From 6392e00653d3b81062ef60d8ae2fa2621873533f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Dominik=20S=C3=BC=C3=9F?= <dominik@suess.wtf>
Date: Tue, 9 May 2023 19:19:48 +0200
Subject: [PATCH] feat: initial tracing support (#1623)

---
 cmd/gotosocial/action/server/server.go        |   22 +-
 cmd/gotosocial/action/testrig/testrig.go      |   20 +-
 docs/configuration/observability.md           |   26 +
 go.mod                                        |   18 +
 go.sum                                        |   73 +-
 internal/config/config.go                     |    7 +-
 internal/config/defaults.go                   |    5 +
 internal/config/helpers.gen.go                |  100 +
 internal/db/bundb/bundb.go                    |    5 +
 internal/middleware/requestid.go              |    3 +-
 internal/tracing/no_trace.go                  |   43 +
 internal/tracing/tracing.go                   |  175 +
 test/envparsing.sh                            |    5 +
 testrig/config.go                             |    5 +
 .../github.com/cenkalti/backoff/v4/.gitignore |   25 +
 vendor/github.com/cenkalti/backoff/v4/LICENSE |   20 +
 .../github.com/cenkalti/backoff/v4/README.md  |   32 +
 .../github.com/cenkalti/backoff/v4/backoff.go |   66 +
 .../github.com/cenkalti/backoff/v4/context.go |   62 +
 .../cenkalti/backoff/v4/exponential.go        |  161 +
 .../github.com/cenkalti/backoff/v4/retry.go   |  146 +
 .../github.com/cenkalti/backoff/v4/ticker.go  |   97 +
 .../github.com/cenkalti/backoff/v4/timer.go   |   35 +
 .../github.com/cenkalti/backoff/v4/tries.go   |   38 +
 vendor/github.com/go-logr/logr/.golangci.yaml |   29 +
 vendor/github.com/go-logr/logr/CHANGELOG.md   |    6 +
 .../github.com/go-logr/logr/CONTRIBUTING.md   |   17 +
 vendor/github.com/go-logr/logr/LICENSE        |  201 +
 vendor/github.com/go-logr/logr/README.md      |  282 ++
 vendor/github.com/go-logr/logr/discard.go     |   54 +
 vendor/github.com/go-logr/logr/funcr/funcr.go |  787 ++++
 vendor/github.com/go-logr/logr/logr.go        |  510 +++
 vendor/github.com/go-logr/stdr/LICENSE        |  201 +
 vendor/github.com/go-logr/stdr/README.md      |    6 +
 vendor/github.com/go-logr/stdr/stdr.go        |  170 +
 .../golang/protobuf/jsonpb/decode.go          |  524 +++
 .../golang/protobuf/jsonpb/encode.go          |  559 +++
 .../github.com/golang/protobuf/jsonpb/json.go |   69 +
 .../github.com/golang/protobuf/ptypes/any.go  |  179 +
 .../golang/protobuf/ptypes/any/any.pb.go      |   62 +
 .../github.com/golang/protobuf/ptypes/doc.go  |   10 +
 .../golang/protobuf/ptypes/duration.go        |   76 +
 .../protobuf/ptypes/duration/duration.pb.go   |   63 +
 .../golang/protobuf/ptypes/timestamp.go       |  112 +
 .../protobuf/ptypes/timestamp/timestamp.pb.go |   64 +
 .../grpc-gateway/v2/LICENSE.txt               |   27 +
 .../v2/internal/httprule/BUILD.bazel          |   35 +
 .../v2/internal/httprule/compile.go           |  121 +
 .../grpc-gateway/v2/internal/httprule/fuzz.go |   11 +
 .../v2/internal/httprule/parse.go             |  368 ++
 .../v2/internal/httprule/types.go             |   60 +
 .../grpc-gateway/v2/runtime/BUILD.bazel       |   91 +
 .../grpc-gateway/v2/runtime/context.go        |  345 ++
 .../grpc-gateway/v2/runtime/convert.go        |  322 ++
 .../grpc-gateway/v2/runtime/doc.go            |    5 +
 .../grpc-gateway/v2/runtime/errors.go         |  180 +
 .../grpc-gateway/v2/runtime/fieldmask.go      |  165 +
 .../grpc-gateway/v2/runtime/handler.go        |  223 ++
 .../v2/runtime/marshal_httpbodyproto.go       |   32 +
 .../grpc-gateway/v2/runtime/marshal_json.go   |   45 +
 .../grpc-gateway/v2/runtime/marshal_jsonpb.go |  344 ++
 .../grpc-gateway/v2/runtime/marshal_proto.go  |   63 +
 .../grpc-gateway/v2/runtime/marshaler.go      |   50 +
 .../v2/runtime/marshaler_registry.go          |  109 +
 .../grpc-gateway/v2/runtime/mux.go            |  356 ++
 .../grpc-gateway/v2/runtime/pattern.go        |  383 ++
 .../grpc-gateway/v2/runtime/proto2_convert.go |   80 +
 .../grpc-gateway/v2/runtime/query.go          |  329 ++
 .../grpc-gateway/v2/utilities/BUILD.bazel     |   27 +
 .../grpc-gateway/v2/utilities/doc.go          |    2 +
 .../grpc-gateway/v2/utilities/pattern.go      |   22 +
 .../v2/utilities/readerfactory.go             |   20 +
 .../grpc-gateway/v2/utilities/trie.go         |  174 +
 .../uptrace/bun/extra/bunotel/LICENSE         |   24 +
 .../uptrace/bun/extra/bunotel/README.md       |    3 +
 .../uptrace/bun/extra/bunotel/option.go       |   32 +
 .../uptrace/bun/extra/bunotel/otel.go         |  188 +
 .../uptrace/bun/extra/bunotel/safe.go         |   11 +
 .../uptrace/bun/extra/bunotel/unsafe.go       |   18 +
 .../otelsql/.golangci.yml                     |    5 +
 .../opentelemetry-go-extra/otelsql/LICENSE    |   24 +
 .../opentelemetry-go-extra/otelsql/README.md  |  118 +
 .../opentelemetry-go-extra/otelsql/driver.go  |  460 +++
 .../opentelemetry-go-extra/otelsql/otel.go    |  254 ++
 .../opentelemetry-go-extra/otelsql/stmt.go    |  120 +
 .../opentelemetry-go-extra/otelsql/tx.go      |   38 +
 .../opentelemetry-go-extra/otelsql/version.go |    6 +
 .../github.com/gin-gonic/gin/otelgin/LICENSE  |  201 +
 .../github.com/gin-gonic/gin/otelgin/doc.go   |   21 +
 .../gin-gonic/gin/otelgin/gintrace.go         |  142 +
 .../gin-gonic/gin/otelgin/option.go           |   90 +
 .../gin-gonic/gin/otelgin/version.go          |   26 +
 .../go.opentelemetry.io/otel/.gitattributes   |    3 +
 vendor/go.opentelemetry.io/otel/.gitignore    |   21 +
 vendor/go.opentelemetry.io/otel/.gitmodules   |    3 +
 vendor/go.opentelemetry.io/otel/.golangci.yml |  244 ++
 vendor/go.opentelemetry.io/otel/.lycheeignore |    6 +
 .../otel/.markdownlint.yaml                   |   29 +
 vendor/go.opentelemetry.io/otel/CHANGELOG.md  | 2369 ++++++++++++
 vendor/go.opentelemetry.io/otel/CODEOWNERS    |   17 +
 .../go.opentelemetry.io/otel/CONTRIBUTING.md  |  526 +++
 vendor/go.opentelemetry.io/otel/LICENSE       |  201 +
 vendor/go.opentelemetry.io/otel/Makefile      |  227 ++
 vendor/go.opentelemetry.io/otel/README.md     |  114 +
 vendor/go.opentelemetry.io/otel/RELEASING.md  |  127 +
 vendor/go.opentelemetry.io/otel/VERSIONING.md |  224 ++
 .../go.opentelemetry.io/otel/attribute/doc.go |   16 +
 .../otel/attribute/encoder.go                 |  146 +
 .../otel/attribute/iterator.go                |  161 +
 .../go.opentelemetry.io/otel/attribute/key.go |  134 +
 .../go.opentelemetry.io/otel/attribute/kv.go  |   86 +
 .../go.opentelemetry.io/otel/attribute/set.go |  424 +++
 .../otel/attribute/type_string.go             |   31 +
 .../otel/attribute/value.go                   |  270 ++
 .../otel/baggage/baggage.go                   |  570 +++
 .../otel/baggage/context.go                   |   39 +
 .../go.opentelemetry.io/otel/baggage/doc.go   |   20 +
 .../go.opentelemetry.io/otel/codes/codes.go   |  116 +
 vendor/go.opentelemetry.io/otel/codes/doc.go  |   21 +
 vendor/go.opentelemetry.io/otel/doc.go        |   34 +
 .../go.opentelemetry.io/otel/error_handler.go |   38 +
 .../otel/exporters/jaeger/LICENSE             |  201 +
 .../otel/exporters/jaeger/README.md           |   50 +
 .../otel/exporters/jaeger/agent.go            |  213 ++
 .../otel/exporters/jaeger/doc.go              |   16 +
 .../otel/exporters/jaeger/env.go              |   44 +
 .../gen-go/agent/GoUnusedProtection__.go      |    6 +
 .../internal/gen-go/agent/agent-consts.go     |   27 +
 .../jaeger/internal/gen-go/agent/agent.go     |  412 ++
 .../gen-go/jaeger/GoUnusedProtection__.go     |    6 +
 .../internal/gen-go/jaeger/jaeger-consts.go   |   22 +
 .../jaeger/internal/gen-go/jaeger/jaeger.go   | 3022 +++++++++++++++
 .../gen-go/zipkincore/GoUnusedProtection__.go |    6 +
 .../gen-go/zipkincore/zipkincore-consts.go    |   39 +
 .../internal/gen-go/zipkincore/zipkincore.go  | 2067 ++++++++++
 .../internal/third_party/thrift/LICENSE       |  306 ++
 .../jaeger/internal/third_party/thrift/NOTICE |    5 +
 .../lib/go/thrift/application_exception.go    |  180 +
 .../thrift/lib/go/thrift/binary_protocol.go   |  555 +++
 .../lib/go/thrift/buffered_transport.go       |   99 +
 .../thrift/lib/go/thrift/client.go            |  109 +
 .../thrift/lib/go/thrift/compact_protocol.go  |  865 +++++
 .../thrift/lib/go/thrift/configuration.go     |  378 ++
 .../thrift/lib/go/thrift/context.go           |   24 +
 .../thrift/lib/go/thrift/debug_protocol.go    |  447 +++
 .../thrift/lib/go/thrift/deserializer.go      |  121 +
 .../thrift/lib/go/thrift/exception.go         |  116 +
 .../thrift/lib/go/thrift/framed_transport.go  |  223 ++
 .../thrift/lib/go/thrift/header_context.go    |  110 +
 .../thrift/lib/go/thrift/header_protocol.go   |  351 ++
 .../thrift/lib/go/thrift/header_transport.go  |  809 ++++
 .../thrift/lib/go/thrift/http_client.go       |  257 ++
 .../thrift/lib/go/thrift/http_transport.go    |   74 +
 .../lib/go/thrift/iostream_transport.go       |  222 ++
 .../thrift/lib/go/thrift/json_protocol.go     |  591 +++
 .../thrift/lib/go/thrift/logger.go            |   69 +
 .../thrift/lib/go/thrift/memory_buffer.go     |   80 +
 .../thrift/lib/go/thrift/messagetype.go       |   31 +
 .../thrift/lib/go/thrift/middleware.go        |  109 +
 .../lib/go/thrift/multiplexed_protocol.go     |  237 ++
 .../thrift/lib/go/thrift/numeric.go           |  164 +
 .../thrift/lib/go/thrift/pointerize.go        |   52 +
 .../thrift/lib/go/thrift/processor_factory.go |   80 +
 .../thrift/lib/go/thrift/protocol.go          |  177 +
 .../lib/go/thrift/protocol_exception.go       |  104 +
 .../thrift/lib/go/thrift/protocol_factory.go  |   25 +
 .../thrift/lib/go/thrift/response_helper.go   |   94 +
 .../thrift/lib/go/thrift/rich_transport.go    |   71 +
 .../thrift/lib/go/thrift/serializer.go        |  136 +
 .../thrift/lib/go/thrift/server.go            |   35 +
 .../thrift/lib/go/thrift/server_socket.go     |  137 +
 .../thrift/lib/go/thrift/server_transport.go  |   34 +
 .../lib/go/thrift/simple_json_protocol.go     | 1373 +++++++
 .../thrift/lib/go/thrift/simple_server.go     |  332 ++
 .../thrift/lib/go/thrift/socket.go            |  238 ++
 .../thrift/lib/go/thrift/socket_conn.go       |  102 +
 .../thrift/lib/go/thrift/socket_unix_conn.go  |   83 +
 .../lib/go/thrift/socket_windows_conn.go      |   34 +
 .../thrift/lib/go/thrift/ssl_server_socket.go |  112 +
 .../thrift/lib/go/thrift/ssl_socket.go        |  258 ++
 .../thrift/lib/go/thrift/transport.go         |   70 +
 .../lib/go/thrift/transport_exception.go      |  131 +
 .../thrift/lib/go/thrift/transport_factory.go |   39 +
 .../third_party/thrift/lib/go/thrift/type.go  |   69 +
 .../thrift/lib/go/thrift/zlib_transport.go    |  137 +
 .../otel/exporters/jaeger/jaeger.go           |  360 ++
 .../jaeger/reconnecting_udp_client.go         |  204 +
 .../otel/exporters/jaeger/uploader.go         |  339 ++
 .../otel/exporters/otlp/internal/config.go    |   34 +
 .../otlp/internal/envconfig/envconfig.go      |  199 +
 .../otel/exporters/otlp/internal/header.go    |   24 +
 .../exporters/otlp/internal/partialsuccess.go |   64 +
 .../exporters/otlp/internal/retry/LICENSE     |  201 +
 .../exporters/otlp/internal/retry/retry.go    |  153 +
 .../exporters/otlp/internal/wrappederror.go   |   61 +
 .../otel/exporters/otlp/otlptrace/LICENSE     |  201 +
 .../otel/exporters/otlp/otlptrace/README.md   |   51 +
 .../otel/exporters/otlp/otlptrace/clients.go  |   54 +
 .../otel/exporters/otlp/otlptrace/exporter.go |  118 +
 .../internal/otlpconfig/envconfig.go          |  150 +
 .../otlptrace/internal/otlpconfig/options.go  |  308 ++
 .../internal/otlpconfig/optiontypes.go        |   48 +
 .../otlp/otlptrace/internal/otlpconfig/tls.go |   34 +
 .../internal/tracetransform/attribute.go      |  158 +
 .../tracetransform/instrumentation.go         |   30 +
 .../internal/tracetransform/resource.go       |   28 +
 .../otlptrace/internal/tracetransform/span.go |  205 +
 .../otlp/otlptrace/otlptracegrpc/LICENSE      |  201 +
 .../otlp/otlptrace/otlptracegrpc/client.go    |  296 ++
 .../otlp/otlptrace/otlptracegrpc/exporter.go  |   31 +
 .../otlp/otlptrace/otlptracegrpc/options.go   |  189 +
 .../go.opentelemetry.io/otel/get_main_pkgs.sh |   41 +
 vendor/go.opentelemetry.io/otel/handler.go    |   96 +
 .../otel/internal/attribute/attribute.go      |  111 +
 .../otel/internal/baggage/baggage.go          |   43 +
 .../otel/internal/baggage/context.go          |   92 +
 .../otel/internal/global/internal_logging.go  |   63 +
 .../otel/internal/global/propagator.go        |   82 +
 .../otel/internal/global/state.go             |  115 +
 .../otel/internal/global/trace.go             |  192 +
 .../otel/internal/rawhelpers.go               |   55 +
 .../otel/internal_logging.go                  |   26 +
 .../go.opentelemetry.io/otel/metric/LICENSE   |  201 +
 .../go.opentelemetry.io/otel/metric/config.go |   69 +
 vendor/go.opentelemetry.io/otel/metric/doc.go |   23 +
 .../otel/metric/global/global.go              |   42 +
 .../otel/metric/instrument/asyncfloat64.go    |  131 +
 .../otel/metric/instrument/asyncint64.go      |  131 +
 .../otel/metric/instrument/instrument.go      |   90 +
 .../otel/metric/instrument/syncfloat64.go     |   86 +
 .../otel/metric/instrument/syncint64.go       |   86 +
 .../metric/internal/global/instruments.go     |  355 ++
 .../otel/metric/internal/global/meter.go      |  354 ++
 .../otel/metric/internal/global/state.go      |   68 +
 .../go.opentelemetry.io/otel/metric/meter.go  |  138 +
 .../go.opentelemetry.io/otel/metric/noop.go   |  198 +
 .../otel/metric/unit/doc.go                   |   20 +
 .../otel/metric/unit/unit.go                  |   25 +
 .../go.opentelemetry.io/otel/propagation.go   |   31 +
 .../otel/propagation/baggage.go               |   58 +
 .../otel/propagation/doc.go                   |   24 +
 .../otel/propagation/propagation.go           |  153 +
 .../otel/propagation/trace_context.go         |  159 +
 vendor/go.opentelemetry.io/otel/sdk/LICENSE   |  201 +
 .../otel/sdk/instrumentation/doc.go           |   24 +
 .../otel/sdk/instrumentation/library.go       |   19 +
 .../otel/sdk/instrumentation/scope.go         |   26 +
 .../otel/sdk/internal/env/env.go              |  177 +
 .../otel/sdk/internal/internal.go             |   37 +
 .../otel/sdk/resource/auto.go                 |   72 +
 .../otel/sdk/resource/builtin.go              |  108 +
 .../otel/sdk/resource/config.go               |  201 +
 .../otel/sdk/resource/container.go            |  100 +
 .../otel/sdk/resource/doc.go                  |   28 +
 .../otel/sdk/resource/env.go                  |  108 +
 .../otel/sdk/resource/os.go                   |   97 +
 .../otel/sdk/resource/os_release_darwin.go    |  102 +
 .../otel/sdk/resource/os_release_unix.go      |  154 +
 .../otel/sdk/resource/os_unix.go              |   90 +
 .../otel/sdk/resource/os_unsupported.go       |   34 +
 .../otel/sdk/resource/os_windows.go           |  101 +
 .../otel/sdk/resource/process.go              |  180 +
 .../otel/sdk/resource/resource.go             |  282 ++
 .../otel/sdk/trace/batch_span_processor.go    |  432 +++
 .../go.opentelemetry.io/otel/sdk/trace/doc.go |   21 +
 .../otel/sdk/trace/event.go                   |   37 +
 .../otel/sdk/trace/evictedqueue.go            |   44 +
 .../otel/sdk/trace/id_generator.go            |   77 +
 .../otel/sdk/trace/link.go                    |   34 +
 .../otel/sdk/trace/provider.go                |  461 +++
 .../otel/sdk/trace/sampler_env.go             |  108 +
 .../otel/sdk/trace/sampling.go                |  293 ++
 .../otel/sdk/trace/simple_span_processor.go   |  128 +
 .../otel/sdk/trace/snapshot.go                |  144 +
 .../otel/sdk/trace/span.go                    |  828 ++++
 .../otel/sdk/trace/span_exporter.go           |   47 +
 .../otel/sdk/trace/span_limits.go             |  125 +
 .../otel/sdk/trace/span_processor.go          |   72 +
 .../otel/sdk/trace/tracer.go                  |  161 +
 .../otel/semconv/internal/http.go             |  336 ++
 .../otel/semconv/internal/v2/http.go          |  404 ++
 .../otel/semconv/internal/v2/net.go           |  324 ++
 .../otel/semconv/v1.10.0/doc.go               |   20 +
 .../otel/semconv/v1.10.0/exception.go         |   20 +
 .../otel/semconv/v1.10.0/http.go              |  114 +
 .../otel/semconv/v1.10.0/resource.go          |  981 +++++
 .../otel/semconv/v1.10.0/schema.go            |   20 +
 .../otel/semconv/v1.10.0/trace.go             | 1700 +++++++++
 .../otel/semconv/v1.12.0/doc.go               |   20 +
 .../otel/semconv/v1.12.0/exception.go         |   20 +
 .../otel/semconv/v1.12.0/http.go              |  114 +
 .../otel/semconv/v1.12.0/resource.go          | 1042 +++++
 .../otel/semconv/v1.12.0/schema.go            |   20 +
 .../otel/semconv/v1.12.0/trace.go             | 1704 +++++++++
 .../otel/semconv/v1.17.0/doc.go               |   20 +
 .../otel/semconv/v1.17.0/event.go             |  199 +
 .../otel/semconv/v1.17.0/exception.go         |   20 +
 .../otel/semconv/v1.17.0/http.go              |   21 +
 .../otel/semconv/v1.17.0/httpconv/http.go     |  150 +
 .../otel/semconv/v1.17.0/resource.go          | 2010 ++++++++++
 .../otel/semconv/v1.17.0/schema.go            |   20 +
 .../otel/semconv/v1.17.0/trace.go             | 3375 +++++++++++++++++
 vendor/go.opentelemetry.io/otel/trace.go      |   47 +
 vendor/go.opentelemetry.io/otel/trace/LICENSE |  201 +
 .../go.opentelemetry.io/otel/trace/config.go  |  333 ++
 .../go.opentelemetry.io/otel/trace/context.go |   61 +
 vendor/go.opentelemetry.io/otel/trace/doc.go  |   66 +
 .../otel/trace/nonrecording.go                |   27 +
 vendor/go.opentelemetry.io/otel/trace/noop.go |   89 +
 .../go.opentelemetry.io/otel/trace/trace.go   |  551 +++
 .../otel/trace/tracestate.go                  |  212 ++
 .../otel/verify_examples.sh                   |   85 +
 vendor/go.opentelemetry.io/otel/version.go    |   20 +
 vendor/go.opentelemetry.io/otel/versions.yaml |   57 +
 vendor/go.opentelemetry.io/proto/otlp/LICENSE |  201 +
 .../collector/trace/v1/trace_service.pb.go    |  367 ++
 .../collector/trace/v1/trace_service.pb.gw.go |  167 +
 .../trace/v1/trace_service_grpc.pb.go         |  109 +
 .../proto/otlp/common/v1/common.pb.go         |  627 +++
 .../proto/otlp/resource/v1/resource.pb.go     |  193 +
 .../proto/otlp/trace/v1/trace.pb.go           | 1140 ++++++
 .../x/net/internal/timeseries/timeseries.go   |  525 +++
 vendor/golang.org/x/net/trace/events.go       |  532 +++
 vendor/golang.org/x/net/trace/histogram.go    |  365 ++
 vendor/golang.org/x/net/trace/trace.go        | 1130 ++++++
 .../golang.org/x/sys/windows/registry/key.go  |  206 +
 .../x/sys/windows/registry/mksyscall.go       |   10 +
 .../x/sys/windows/registry/syscall.go         |   33 +
 .../x/sys/windows/registry/value.go           |  387 ++
 .../sys/windows/registry/zsyscall_windows.go  |  117 +
 vendor/google.golang.org/genproto/LICENSE     |  202 +
 .../googleapis/api/httpbody/httpbody.pb.go    |  235 ++
 .../rpc/errdetails/error_details.pb.go        | 1314 +++++++
 .../googleapis/rpc/status/status.pb.go        |  203 +
 .../protobuf/field_mask/field_mask.go         |   23 +
 vendor/google.golang.org/grpc/AUTHORS         |    1 +
 .../google.golang.org/grpc/CODE-OF-CONDUCT.md |    3 +
 vendor/google.golang.org/grpc/CONTRIBUTING.md |   60 +
 vendor/google.golang.org/grpc/GOVERNANCE.md   |    1 +
 vendor/google.golang.org/grpc/LICENSE         |  202 +
 vendor/google.golang.org/grpc/MAINTAINERS.md  |   28 +
 vendor/google.golang.org/grpc/Makefile        |   46 +
 vendor/google.golang.org/grpc/NOTICE.txt      |   13 +
 vendor/google.golang.org/grpc/README.md       |  141 +
 vendor/google.golang.org/grpc/SECURITY.md     |    3 +
 .../grpc/attributes/attributes.go             |  101 +
 vendor/google.golang.org/grpc/backoff.go      |   61 +
 .../google.golang.org/grpc/backoff/backoff.go |   52 +
 .../grpc/balancer/balancer.go                 |  404 ++
 .../grpc/balancer/base/balancer.go            |  254 ++
 .../grpc/balancer/base/base.go                |   71 +
 .../grpc/balancer/conn_state_evaluator.go     |   74 +
 .../grpc/balancer/grpclb/state/state.go       |   51 +
 .../grpc/balancer/roundrobin/roundrobin.go    |   81 +
 .../grpc/balancer_conn_wrappers.go            |  481 +++
 .../grpc_binarylog_v1/binarylog.pb.go         | 1183 ++++++
 vendor/google.golang.org/grpc/call.go         |   74 +
 .../grpc/channelz/channelz.go                 |   36 +
 vendor/google.golang.org/grpc/clientconn.go   | 1647 ++++++++
 vendor/google.golang.org/grpc/codec.go        |   50 +
 vendor/google.golang.org/grpc/codegen.sh      |   17 +
 .../grpc/codes/code_string.go                 |   62 +
 vendor/google.golang.org/grpc/codes/codes.go  |  244 ++
 .../grpc/connectivity/connectivity.go         |   94 +
 .../grpc/credentials/credentials.go           |  291 ++
 .../grpc/credentials/insecure/insecure.go     |   98 +
 .../google.golang.org/grpc/credentials/tls.go |  236 ++
 vendor/google.golang.org/grpc/dialoptions.go  |  637 ++++
 vendor/google.golang.org/grpc/doc.go          |   26 +
 .../grpc/encoding/encoding.go                 |  135 +
 .../grpc/encoding/gzip/gzip.go                |  132 +
 .../grpc/encoding/proto/proto.go              |   58 +
 .../grpc/grpclog/component.go                 |  117 +
 .../google.golang.org/grpc/grpclog/grpclog.go |  132 +
 .../google.golang.org/grpc/grpclog/logger.go  |   87 +
 .../grpc/grpclog/loggerv2.go                  |  258 ++
 vendor/google.golang.org/grpc/interceptor.go  |  104 +
 .../grpc/internal/backoff/backoff.go          |   73 +
 .../balancer/gracefulswitch/gracefulswitch.go |  384 ++
 .../grpc/internal/balancerload/load.go        |   46 +
 .../grpc/internal/binarylog/binarylog.go      |  189 +
 .../internal/binarylog/binarylog_testutil.go  |   42 +
 .../grpc/internal/binarylog/env_config.go     |  208 +
 .../grpc/internal/binarylog/method_logger.go  |  435 +++
 .../grpc/internal/binarylog/sink.go           |  170 +
 .../grpc/internal/buffer/unbounded.go         |   85 +
 .../grpc/internal/channelz/funcs.go           |  789 ++++
 .../grpc/internal/channelz/id.go              |   75 +
 .../grpc/internal/channelz/logging.go         |   79 +
 .../grpc/internal/channelz/types.go           |  722 ++++
 .../grpc/internal/channelz/types_linux.go     |   51 +
 .../grpc/internal/channelz/types_nonlinux.go  |   43 +
 .../grpc/internal/channelz/util_linux.go      |   37 +
 .../grpc/internal/channelz/util_nonlinux.go   |   27 +
 .../grpc/internal/credentials/credentials.go  |   49 +
 .../grpc/internal/credentials/spiffe.go       |   75 +
 .../grpc/internal/credentials/syscallconn.go  |   58 +
 .../grpc/internal/credentials/util.go         |   52 +
 .../grpc/internal/envconfig/envconfig.go      |   62 +
 .../grpc/internal/envconfig/observability.go  |   36 +
 .../grpc/internal/envconfig/xds.go            |   92 +
 .../grpc/internal/grpclog/grpclog.go          |  126 +
 .../grpc/internal/grpclog/prefixLogger.go     |   81 +
 .../grpc/internal/grpcrand/grpcrand.go        |   74 +
 .../grpc/internal/grpcsync/event.go           |   61 +
 .../grpc/internal/grpcsync/oncefunc.go        |   32 +
 .../grpc/internal/grpcutil/compressor.go      |   47 +
 .../grpc/internal/grpcutil/encode_duration.go |   63 +
 .../grpc/internal/grpcutil/grpcutil.go        |   20 +
 .../grpc/internal/grpcutil/metadata.go        |   40 +
 .../grpc/internal/grpcutil/method.go          |   88 +
 .../grpc/internal/grpcutil/regex.go           |   31 +
 .../grpc/internal/internal.go                 |  160 +
 .../grpc/internal/metadata/metadata.go        |  120 +
 .../grpc/internal/pretty/pretty.go            |   82 +
 .../grpc/internal/resolver/config_selector.go |  167 +
 .../internal/resolver/dns/dns_resolver.go     |  458 +++
 .../resolver/passthrough/passthrough.go       |   64 +
 .../grpc/internal/resolver/unix/unix.go       |   74 +
 .../internal/serviceconfig/serviceconfig.go   |  180 +
 .../grpc/internal/status/status.go            |  176 +
 .../grpc/internal/syscall/syscall_linux.go    |  112 +
 .../grpc/internal/syscall/syscall_nonlinux.go |   77 +
 .../grpc/internal/transport/bdp_estimator.go  |  141 +
 .../grpc/internal/transport/controlbuf.go     |  998 +++++
 .../grpc/internal/transport/defaults.go       |   55 +
 .../grpc/internal/transport/flowcontrol.go    |  215 ++
 .../grpc/internal/transport/handler_server.go |  477 +++
 .../grpc/internal/transport/http2_client.go   | 1800 +++++++++
 .../grpc/internal/transport/http2_server.go   | 1461 +++++++
 .../grpc/internal/transport/http_util.go      |  412 ++
 .../transport/networktype/networktype.go      |   46 +
 .../grpc/internal/transport/proxy.go          |  142 +
 .../grpc/internal/transport/transport.go      |  823 ++++
 .../grpc/internal/xds_handshake_cluster.go    |   40 +
 .../grpc/keepalive/keepalive.go               |   85 +
 .../grpc/metadata/metadata.go                 |  288 ++
 vendor/google.golang.org/grpc/peer/peer.go    |   51 +
 .../google.golang.org/grpc/picker_wrapper.go  |  194 +
 vendor/google.golang.org/grpc/pickfirst.go    |  183 +
 vendor/google.golang.org/grpc/preloader.go    |   67 +
 vendor/google.golang.org/grpc/regenerate.sh   |  123 +
 vendor/google.golang.org/grpc/resolver/map.go |  138 +
 .../grpc/resolver/resolver.go                 |  308 ++
 .../grpc/resolver_conn_wrapper.go             |  176 +
 vendor/google.golang.org/grpc/rpc_util.go     |  916 +++++
 vendor/google.golang.org/grpc/server.go       | 1971 ++++++++++
 .../google.golang.org/grpc/service_config.go  |  406 ++
 .../grpc/serviceconfig/serviceconfig.go       |   44 +
 .../google.golang.org/grpc/stats/handlers.go  |   63 +
 vendor/google.golang.org/grpc/stats/stats.go  |  319 ++
 .../google.golang.org/grpc/status/status.go   |  135 +
 vendor/google.golang.org/grpc/stream.go       | 1740 +++++++++
 vendor/google.golang.org/grpc/tap/tap.go      |   56 +
 vendor/google.golang.org/grpc/trace.go        |  123 +
 vendor/google.golang.org/grpc/version.go      |   22 +
 vendor/google.golang.org/grpc/vet.sh          |  217 ++
 .../protobuf/encoding/protojson/decode.go     |  665 ++++
 .../protobuf/encoding/protojson/doc.go        |   11 +
 .../protobuf/encoding/protojson/encode.go     |  343 ++
 .../encoding/protojson/well_known_types.go    |  889 +++++
 .../protobuf/internal/encoding/json/decode.go |  340 ++
 .../internal/encoding/json/decode_number.go   |  254 ++
 .../internal/encoding/json/decode_string.go   |   91 +
 .../internal/encoding/json/decode_token.go    |  192 +
 .../protobuf/internal/encoding/json/encode.go |  276 ++
 .../protobuf/types/known/anypb/any.pb.go      |  498 +++
 .../types/known/durationpb/duration.pb.go     |  379 ++
 .../types/known/fieldmaskpb/field_mask.pb.go  |  591 +++
 .../types/known/timestamppb/timestamp.pb.go   |  390 ++
 .../types/known/wrapperspb/wrappers.pb.go     |  760 ++++
 vendor/modules.txt                            |  156 +
 472 files changed, 102600 insertions(+), 12 deletions(-)
 create mode 100644 internal/tracing/no_trace.go
 create mode 100644 internal/tracing/tracing.go
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/.gitignore
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/LICENSE
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/README.md
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/backoff.go
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/context.go
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/exponential.go
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/retry.go
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/ticker.go
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/timer.go
 create mode 100644 vendor/github.com/cenkalti/backoff/v4/tries.go
 create mode 100644 vendor/github.com/go-logr/logr/.golangci.yaml
 create mode 100644 vendor/github.com/go-logr/logr/CHANGELOG.md
 create mode 100644 vendor/github.com/go-logr/logr/CONTRIBUTING.md
 create mode 100644 vendor/github.com/go-logr/logr/LICENSE
 create mode 100644 vendor/github.com/go-logr/logr/README.md
 create mode 100644 vendor/github.com/go-logr/logr/discard.go
 create mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go
 create mode 100644 vendor/github.com/go-logr/logr/logr.go
 create mode 100644 vendor/github.com/go-logr/stdr/LICENSE
 create mode 100644 vendor/github.com/go-logr/stdr/README.md
 create mode 100644 vendor/github.com/go-logr/stdr/stdr.go
 create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go
 create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go
 create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go
 create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go
 create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
 create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go
 create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go
 create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
 create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go
 create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
 create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
 create mode 100644 vendor/github.com/uptrace/bun/extra/bunotel/LICENSE
 create mode 100644 vendor/github.com/uptrace/bun/extra/bunotel/README.md
 create mode 100644 vendor/github.com/uptrace/bun/extra/bunotel/option.go
 create mode 100644 vendor/github.com/uptrace/bun/extra/bunotel/otel.go
 create mode 100644 vendor/github.com/uptrace/bun/extra/bunotel/safe.go
 create mode 100644 vendor/github.com/uptrace/bun/extra/bunotel/unsafe.go
 create mode 100644 vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/.golangci.yml
 create mode 100644 vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/LICENSE
 create mode 100644 vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/README.md
 create mode 100644 vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/driver.go
 create mode 100644 vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/otel.go
 create mode 100644 vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/stmt.go
 create mode 100644 vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/tx.go
 create mode 100644 vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/version.go
 create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/doc.go
 create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/gintrace.go
 create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/option.go
 create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/version.go
 create mode 100644 vendor/go.opentelemetry.io/otel/.gitattributes
 create mode 100644 vendor/go.opentelemetry.io/otel/.gitignore
 create mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules
 create mode 100644 vendor/go.opentelemetry.io/otel/.golangci.yml
 create mode 100644 vendor/go.opentelemetry.io/otel/.lycheeignore
 create mode 100644 vendor/go.opentelemetry.io/otel/.markdownlint.yaml
 create mode 100644 vendor/go.opentelemetry.io/otel/CHANGELOG.md
 create mode 100644 vendor/go.opentelemetry.io/otel/CODEOWNERS
 create mode 100644 vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
 create mode 100644 vendor/go.opentelemetry.io/otel/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/otel/Makefile
 create mode 100644 vendor/go.opentelemetry.io/otel/README.md
 create mode 100644 vendor/go.opentelemetry.io/otel/RELEASING.md
 create mode 100644 vendor/go.opentelemetry.io/otel/VERSIONING.md
 create mode 100644 vendor/go.opentelemetry.io/otel/attribute/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/attribute/encoder.go
 create mode 100644 vendor/go.opentelemetry.io/otel/attribute/iterator.go
 create mode 100644 vendor/go.opentelemetry.io/otel/attribute/key.go
 create mode 100644 vendor/go.opentelemetry.io/otel/attribute/kv.go
 create mode 100644 vendor/go.opentelemetry.io/otel/attribute/set.go
 create mode 100644 vendor/go.opentelemetry.io/otel/attribute/type_string.go
 create mode 100644 vendor/go.opentelemetry.io/otel/attribute/value.go
 create mode 100644 vendor/go.opentelemetry.io/otel/baggage/baggage.go
 create mode 100644 vendor/go.opentelemetry.io/otel/baggage/context.go
 create mode 100644 vendor/go.opentelemetry.io/otel/baggage/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/codes/codes.go
 create mode 100644 vendor/go.opentelemetry.io/otel/codes/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/error_handler.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
 create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
 create mode 100644 vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
 create mode 100644 vendor/go.opentelemetry.io/otel/handler.go
 create mode 100644 vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
 create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
 create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/context.go
 create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
 create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/propagator.go
 create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/state.go
 create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/trace.go
 create mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
 create mode 100644 vendor/go.opentelemetry.io/otel/internal_logging.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/config.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/global/global.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/internal/global/state.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/meter.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/unit/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/metric/unit/unit.go
 create mode 100644 vendor/go.opentelemetry.io/otel/propagation.go
 create mode 100644 vendor/go.opentelemetry.io/otel/propagation/baggage.go
 create mode 100644 vendor/go.opentelemetry.io/otel/propagation/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/propagation/propagation.go
 create mode 100644 vendor/go.opentelemetry.io/otel/propagation/trace_context.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/internal.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/config.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/container.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/env.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/process.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/event.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/link.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
 create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/http.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
 create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/config.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/context.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/doc.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/nonrecording.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/trace.go
 create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracestate.go
 create mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh
 create mode 100644 vendor/go.opentelemetry.io/otel/version.go
 create mode 100644 vendor/go.opentelemetry.io/otel/versions.yaml
 create mode 100644 vendor/go.opentelemetry.io/proto/otlp/LICENSE
 create mode 100644 vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
 create mode 100644 vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
 create mode 100644 vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
 create mode 100644 vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
 create mode 100644 vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
 create mode 100644 vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
 create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go
 create mode 100644 vendor/golang.org/x/net/trace/events.go
 create mode 100644 vendor/golang.org/x/net/trace/histogram.go
 create mode 100644 vendor/golang.org/x/net/trace/trace.go
 create mode 100644 vendor/golang.org/x/sys/windows/registry/key.go
 create mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go
 create mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go
 create mode 100644 vendor/golang.org/x/sys/windows/registry/value.go
 create mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
 create mode 100644 vendor/google.golang.org/genproto/LICENSE
 create mode 100644 vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
 create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
 create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
 create mode 100644 vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
 create mode 100644 vendor/google.golang.org/grpc/AUTHORS
 create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
 create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md
 create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md
 create mode 100644 vendor/google.golang.org/grpc/LICENSE
 create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md
 create mode 100644 vendor/google.golang.org/grpc/Makefile
 create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt
 create mode 100644 vendor/google.golang.org/grpc/README.md
 create mode 100644 vendor/google.golang.org/grpc/SECURITY.md
 create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go
 create mode 100644 vendor/google.golang.org/grpc/backoff.go
 create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go
 create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go
 create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go
 create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go
 create mode 100644 vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
 create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
 create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
 create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go
 create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
 create mode 100644 vendor/google.golang.org/grpc/call.go
 create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go
 create mode 100644 vendor/google.golang.org/grpc/clientconn.go
 create mode 100644 vendor/google.golang.org/grpc/codec.go
 create mode 100644 vendor/google.golang.org/grpc/codegen.sh
 create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go
 create mode 100644 vendor/google.golang.org/grpc/codes/codes.go
 create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go
 create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go
 create mode 100644 vendor/google.golang.org/grpc/credentials/insecure/insecure.go
 create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go
 create mode 100644 vendor/google.golang.org/grpc/dialoptions.go
 create mode 100644 vendor/google.golang.org/grpc/doc.go
 create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go
 create mode 100644 vendor/google.golang.org/grpc/encoding/gzip/gzip.go
 create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go
 create mode 100644 vendor/google.golang.org/grpc/grpclog/component.go
 create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go
 create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go
 create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go
 create mode 100644 vendor/google.golang.org/grpc/interceptor.go
 create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go
 create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
 create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go
 create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
 create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
 create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go
 create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
 create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go
 create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go
 create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go
 create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go
 create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go
 create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go
 create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go
 create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
 create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go
 create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
 create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go
 create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe.go
 create mode 100644 vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
 create mode 100644 vendor/google.golang.org/grpc/internal/credentials/util.go
 create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
 create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go
 create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/method.go
 create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/regex.go
 create mode 100644 vendor/google.golang.org/grpc/internal/internal.go
 create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go
 create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go
 create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go
 create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
 create mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
 create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
 create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
 create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go
 create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
 create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/proxy.go
 create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go
 create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
 create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go
 create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go
 create mode 100644 vendor/google.golang.org/grpc/peer/peer.go
 create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go
 create mode 100644 vendor/google.golang.org/grpc/pickfirst.go
 create mode 100644 vendor/google.golang.org/grpc/preloader.go
 create mode 100644 vendor/google.golang.org/grpc/regenerate.sh
 create mode 100644 vendor/google.golang.org/grpc/resolver/map.go
 create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go
 create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go
 create mode 100644 vendor/google.golang.org/grpc/rpc_util.go
 create mode 100644 vendor/google.golang.org/grpc/server.go
 create mode 100644 vendor/google.golang.org/grpc/service_config.go
 create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
 create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go
 create mode 100644 vendor/google.golang.org/grpc/stats/stats.go
 create mode 100644 vendor/google.golang.org/grpc/status/status.go
 create mode 100644 vendor/google.golang.org/grpc/stream.go
 create mode 100644 vendor/google.golang.org/grpc/tap/tap.go
 create mode 100644 vendor/google.golang.org/grpc/trace.go
 create mode 100644 vendor/google.golang.org/grpc/version.go
 create mode 100644 vendor/google.golang.org/grpc/vet.sh
 create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go
 create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go
 create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go
 create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
 create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
 create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
 create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
 create mode 100644 vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
 create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
 create mode 100644 vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go

diff --git a/cmd/gotosocial/action/server/server.go b/cmd/gotosocial/action/server/server.go
index b4329fec7..b33688800 100644
--- a/cmd/gotosocial/action/server/server.go
+++ b/cmd/gotosocial/action/server/server.go
@@ -32,6 +32,7 @@ import (
 	apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
 	"github.com/superseriousbusiness/gotosocial/internal/gtserror"
 	"github.com/superseriousbusiness/gotosocial/internal/middleware"
+	"github.com/superseriousbusiness/gotosocial/internal/tracing"
 	"go.uber.org/automaxprocs/maxprocs"
 
 	"github.com/superseriousbusiness/gotosocial/internal/config"
@@ -70,6 +71,12 @@ var Start action.GTSAction = func(ctx context.Context) error {
 	state.Caches.Start()
 	defer state.Caches.Stop()
 
+	// Initialize Tracing
+
+	if err := tracing.Initialize(); err != nil {
+		return fmt.Errorf("error initializing tracing: %w", err)
+	}
+
 	// Open connection to the database
 	dbService, err := bundb.NewBunDBService(ctx, &state)
 	if err != nil {
@@ -146,16 +153,23 @@ var Start action.GTSAction = func(ctx context.Context) error {
 		return fmt.Errorf("error creating router: %s", err)
 	}
 
-	// attach global middlewares which are used for every request
-	router.AttachGlobalMiddleware(
-		middleware.AddRequestID(config.GetRequestIDHeader()),
+	middlewares := []gin.HandlerFunc{
+		middleware.AddRequestID(config.GetRequestIDHeader()), // requestID middleware must run before tracing
+	}
+	if config.GetTracingEnabled() {
+		middlewares = append(middlewares, tracing.InstrumentGin())
+	}
+	middlewares = append(middlewares, []gin.HandlerFunc{
 		// note: hooks adding ctx fields must be ABOVE
 		// the logger, otherwise won't be accessible.
 		middleware.Logger(),
 		middleware.UserAgent(),
 		middleware.CORS(),
 		middleware.ExtraHeaders(),
-	)
+	}...)
+
+	// attach global middlewares which are used for every request
+	router.AttachGlobalMiddleware(middlewares...)
 
 	// attach global no route / 404 handler to the router
 	router.AttachNoRouteHandler(func(c *gin.Context) {
diff --git a/cmd/gotosocial/action/testrig/testrig.go b/cmd/gotosocial/action/testrig/testrig.go
index ba944a148..b5d34e7c9 100644
--- a/cmd/gotosocial/action/testrig/testrig.go
+++ b/cmd/gotosocial/action/testrig/testrig.go
@@ -40,6 +40,7 @@ import (
 	"github.com/superseriousbusiness/gotosocial/internal/oidc"
 	"github.com/superseriousbusiness/gotosocial/internal/state"
 	"github.com/superseriousbusiness/gotosocial/internal/storage"
+	"github.com/superseriousbusiness/gotosocial/internal/tracing"
 	"github.com/superseriousbusiness/gotosocial/internal/web"
 	"github.com/superseriousbusiness/gotosocial/testrig"
 )
@@ -51,6 +52,10 @@ var Start action.GTSAction = func(ctx context.Context) error {
 	testrig.InitTestConfig()
 	testrig.InitTestLog()
 
+	if err := tracing.Initialize(); err != nil {
+		return fmt.Errorf("error initializing tracing: %w", err)
+	}
+
 	// Initialize caches
 	state.Caches.Init()
 	state.Caches.Start()
@@ -93,14 +98,21 @@ var Start action.GTSAction = func(ctx context.Context) error {
 	*/
 
 	router := testrig.NewTestRouter(state.DB)
-
-	// attach global middlewares which are used for every request
-	router.AttachGlobalMiddleware(
+	middlewares := []gin.HandlerFunc{
+		middleware.AddRequestID(config.GetRequestIDHeader()), // requestID middleware must run before tracing
+	}
+	if config.GetTracingEnabled() {
+		middlewares = append(middlewares, tracing.InstrumentGin())
+	}
+	middlewares = append(middlewares, []gin.HandlerFunc{
 		middleware.Logger(),
 		middleware.UserAgent(),
 		middleware.CORS(),
 		middleware.ExtraHeaders(),
-	)
+	}...)
+
+	// attach global middlewares which are used for every request
+	router.AttachGlobalMiddleware(middlewares...)
 
 	// attach global no route / 404 handler to the router
 	router.AttachNoRouteHandler(func(c *gin.Context) {
diff --git a/docs/configuration/observability.md b/docs/configuration/observability.md
index 0d5f9e864..ddd423be9 100644
--- a/docs/configuration/observability.md
+++ b/docs/configuration/observability.md
@@ -9,8 +9,34 @@ These settings let you tune and configure certain observability related behaviou
 ##### OBSERVABILITY SETTINGS #####
 ##################################
 
+# Bool. Enable generation/parsing of a request ID for each received HTTP Request.
+# Default: true
+request-id-enabled: true
+
 # String. Header name to use to extract a request or trace ID from. Typically set by a
 # loadbalancer or proxy.
 # Default: "X-Request-Id"
 request-id-header: "X-Request-Id"
+
+# Bool. Enable OpenTelemetry based tracing support.
+# Default: false
+tracing-enabled: false
+
+# String. Set the transport protocol for the tracing system. Can either be "grpc" for
+# OTLP gRPC or "jaeger" for jaeger based ingesters.
+# Options: ["grpc", "jaeger"]
+# Default: "grpc"
+tracing-transport: "grpc"
+
+# String. Endpoint of the trace ingester. When using the gRPC based transport, the
+# endpoint is usually a single address/port combination. For the jaeger transport it
+# should be a fully qualified URL.
+# OTLP gRPC or "jaeger" for jaeger based ingesters
+# Examples: ["localhost:4317", "http://localhost:14268/api/traces"]
+# Default: ""
+tracing-endpoint: ""
+
+# Bool. Disable HTTPS for the gRPC transport protocol.
+# Default: false
+tracing-insecure-transport: false
 ```
diff --git a/go.mod b/go.mod
index 0873d6565..0314652ea 100644
--- a/go.mod
+++ b/go.mod
@@ -49,8 +49,15 @@ require (
 	github.com/uptrace/bun v1.1.12
 	github.com/uptrace/bun/dialect/pgdialect v1.1.12
 	github.com/uptrace/bun/dialect/sqlitedialect v1.1.12
+	github.com/uptrace/bun/extra/bunotel v1.1.12
 	github.com/wagslane/go-password-validator v0.3.0
 	github.com/yuin/goldmark v1.5.4
+	go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.40.0
+	go.opentelemetry.io/otel v1.14.0
+	go.opentelemetry.io/otel/exporters/jaeger v1.14.0
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0
+	go.opentelemetry.io/otel/sdk v1.14.0
+	go.opentelemetry.io/otel/trace v1.14.0
 	go.uber.org/automaxprocs v1.5.2
 	golang.org/x/crypto v0.8.0
 	golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
@@ -77,6 +84,7 @@ require (
 	codeberg.org/gruf/go-pools v1.1.0 // indirect
 	github.com/aymerick/douceur v0.2.0 // indirect
 	github.com/bytedance/sonic v1.8.0 // indirect
+	github.com/cenkalti/backoff/v4 v4.2.0 // indirect
 	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
 	github.com/cilium/ebpf v0.9.1 // indirect
 	github.com/containerd/cgroups/v3 v3.0.1 // indirect
@@ -95,6 +103,8 @@ require (
 	github.com/gin-contrib/sse v0.1.0 // indirect
 	github.com/go-errors/errors v1.4.1 // indirect
 	github.com/go-jose/go-jose/v3 v3.0.0 // indirect
+	github.com/go-logr/logr v1.2.3 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-playground/locales v0.14.1 // indirect
 	github.com/go-playground/universal-translator v0.18.1 // indirect
 	github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 // indirect
@@ -107,6 +117,7 @@ require (
 	github.com/gorilla/css v1.0.0 // indirect
 	github.com/gorilla/securecookie v1.1.1 // indirect
 	github.com/gorilla/sessions v1.2.1 // indirect
+	github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
 	github.com/hashicorp/hcl v1.0.0 // indirect
 	github.com/inconshreveable/mousetrap v1.1.0 // indirect
 	github.com/jackc/chunkreader/v2 v2.0.1 // indirect
@@ -145,13 +156,20 @@ require (
 	github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
 	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
 	github.com/ugorji/go/codec v1.2.9 // indirect
+	github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.21 // indirect
 	github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
 	github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect
+	go.opentelemetry.io/otel/metric v0.36.0 // indirect
+	go.opentelemetry.io/proto/otlp v0.19.0 // indirect
 	golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect
 	golang.org/x/mod v0.10.0 // indirect
 	golang.org/x/sys v0.7.0 // indirect
 	golang.org/x/tools v0.6.0 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
+	google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
+	google.golang.org/grpc v1.53.0 // indirect
 	google.golang.org/protobuf v1.28.1 // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index 57af32f19..35d47ea70 100644
--- a/go.sum
+++ b/go.sum
@@ -92,12 +92,14 @@ github.com/KimMachineGun/automemlimit v0.2.6 h1:tQFriVTcIteUkV5EgU9iz03eDY36T8JU
 github.com/KimMachineGun/automemlimit v0.2.6/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0=
 github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
 github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
 github.com/abema/go-mp4 v0.10.1 h1:wOhZgNxjduc8r4FJdwPa5x/gdBSSX+8MTnfNj/xkJaE=
 github.com/abema/go-mp4 v0.10.1/go.mod h1:vPl9t5ZK7K0x68jh12/+ECWBCXoWuIDtNgPtU2f04ws=
 github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
 github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
 github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
 github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
 github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
 github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
 github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
@@ -105,7 +107,11 @@ github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3b
 github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
 github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA=
 github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
+github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
 github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
 github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
@@ -119,6 +125,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
 github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
 github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
 github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ=
 github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
 github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
@@ -168,6 +179,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
 github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
 github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
 github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
 github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
@@ -180,6 +193,7 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS
 github.com/fxamacker/cbor v1.5.1 h1:XjQWBgdmQyqimslUh5r4tUGmoqzHmBFQOImkWGi2awg=
 github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
 github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
 github.com/gin-contrib/cors v1.4.0/go.mod h1:bs9pNM0x/UsmHPBWT2xZz9ROh8xYjYkiURUfmBoMlcs=
 github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
@@ -205,6 +219,11 @@ github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyM
 github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
 github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
 github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
 github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
 github.com/go-playground/form/v4 v4.2.0 h1:N1wh+Goz61e6w66vo8vJkQt+uwZSoLz50kZPJWR8eic=
@@ -238,6 +257,8 @@ github.com/golang/geo v0.0.0-20200319012246-673a6f80352d/go.mod h1:QZ0nwyI2jOfgR
 github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
 github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
 github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -278,6 +299,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
 github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
 github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
@@ -320,6 +342,9 @@ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
 github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
 github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
 github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
 github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
 github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -485,6 +510,7 @@ github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTep
 github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
 github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
@@ -510,6 +536,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
 github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
 github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
@@ -526,6 +553,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
 github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
 github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -589,6 +617,10 @@ github.com/uptrace/bun/dialect/pgdialect v1.1.12 h1:m/CM1UfOkoBTglGO5CUTKnIKKOAp
 github.com/uptrace/bun/dialect/pgdialect v1.1.12/go.mod h1:Ij6WIxQILxLlL2frUBxUBOZJtLElD2QQNDcu/PWDHTc=
 github.com/uptrace/bun/dialect/sqlitedialect v1.1.12 h1:Ud31nqZmebcQpl151nb108+vtcpxJ7kfXmbPYbALBiI=
 github.com/uptrace/bun/dialect/sqlitedialect v1.1.12/go.mod h1:Pwg7s31BdF3PMBlWTnYkEn2I9ASsvatt1Ln/AERCTV4=
+github.com/uptrace/bun/extra/bunotel v1.1.12 h1:uWPU75j9dYGXMRC9jF0ASlndZZAcngoqZagH4w3kn54=
+github.com/uptrace/bun/extra/bunotel v1.1.12/go.mod h1:QfszJGLzNaTTGvvg17cEEUyEwxXq2NJ7sRvrPYvYSIU=
+github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.21 h1:iHkIlTU2P3xbSbVJbAiHL9IT+ekYV5empheF+652yeQ=
+github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.21/go.mod h1:hiCFa1UeZITKXi8lhu2qwOD5LHXjdGMCUIQHbybxoF0=
 github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
 github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
 github.com/valyala/fasthttp v1.14.0/go.mod h1:ol1PCaL0dX20wC0htZ7sYCsvCYmrouYra0zHzaclZhE=
@@ -631,13 +663,35 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.40.0 h1:E4MMXDxufRnIHXhoTNOlNsdkWpC5HdLhfj84WNRKPkc=
+go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.40.0/go.mod h1:A8+gHkpqTfMKxdKWq1pp360nAs096K26CH5Sm2YHDdA=
+go.opentelemetry.io/contrib/propagators/b3 v1.15.0 h1:bMaonPyFcAvZ4EVzkUNkfnUHP5Zi63CIDlA3dRsEg8Q=
+go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
+go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
+go.opentelemetry.io/otel/exporters/jaeger v1.14.0 h1:CjbUNd4iN2hHmWekmOqZ+zSCU+dzZppG8XsV+A3oc8Q=
+go.opentelemetry.io/otel/exporters/jaeger v1.14.0/go.mod h1:4Ay9kk5vELRrbg5z4cpP9EtmQRFap2Wb0woPG4lujZA=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
+go.opentelemetry.io/otel/metric v0.36.0 h1:t0lgGI+L68QWt3QtOIlqM9gXoxqxWLhZ3R/e5oOAY0Q=
+go.opentelemetry.io/otel/metric v0.36.0/go.mod h1:wKVw57sd2HdSZAzyfOM9gTqqE8v7CbqWsYL6AyrH9qk=
+go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
+go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
+go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
+go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
+go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
 go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
 go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
 go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
 go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
 go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
 go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
 go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
@@ -741,6 +795,7 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY
 golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
 golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
 golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
@@ -757,6 +812,7 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
 golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
 golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
 golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
@@ -818,7 +874,9 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -845,6 +903,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
 golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
@@ -966,6 +1025,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
 google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
 google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
@@ -979,6 +1039,9 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
 google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
 google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -992,9 +1055,15 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
 google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
 google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
 google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
 google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
 google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1007,6 +1076,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
 google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
@@ -1026,6 +1096,7 @@ gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg
 gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/internal/config/config.go b/internal/config/config.go
index a1570bbaf..b5c228a35 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -126,6 +126,11 @@ type Configuration struct {
 	OIDCLinkExisting     bool     `name:"oidc-link-existing" usage:"link existing user accounts to OIDC logins based on the stored email value"`
 	OIDCAdminGroups      []string `name:"oidc-admin-groups" usage:"Membership of one of the listed groups makes someone a GtS admin"`
 
+	TracingEnabled           bool   `name:"tracing-enabled" usage:"Enable OTLP Tracing"`
+	TracingTransport         string `name:"tracing-transport" usage:"grpc or jaeger"`
+	TracingEndpoint          string `name:"tracing-endpoint" usage:"Endpoint of your trace collector. Eg., 'localhost:4317' for gRPC, 'http://localhost:14268/api/traces' for jaeger"`
+	TracingInsecureTransport bool   `name:"tracing-insecure" usage:"Disable HTTPS for the gRPC transport protocol"`
+
 	SMTPHost               string `name:"smtp-host" usage:"Host of the smtp server. Eg., 'smtp.eu.mailgun.org'"`
 	SMTPPort               int    `name:"smtp-port" usage:"Port of the smtp server. Eg., 587"`
 	SMTPUsername           string `name:"smtp-username" usage:"Username to authenticate with the smtp server as. Eg., 'postmaster@mail.example.org'"`
@@ -153,7 +158,7 @@ type Configuration struct {
 	AdminTransPath        string `name:"path" usage:"the path of the file to import from/export to"`
 	AdminMediaPruneDryRun bool   `name:"dry-run" usage:"perform a dry run and only log number of items eligible for pruning"`
 
-	RequestIDHeader string `name:"request-id-header" usage:"Header to extract the Request ID from. Eg.,'X-Request-Id'"`
+	RequestIDHeader string `name:"request-id-header" usage:"Header to extract the Request ID from. Eg.,'X-Request-Id'."`
 }
 
 type CacheConfiguration struct {
diff --git a/internal/config/defaults.go b/internal/config/defaults.go
index 1dc446e4c..54672c159 100644
--- a/internal/config/defaults.go
+++ b/internal/config/defaults.go
@@ -109,6 +109,11 @@ var Defaults = Configuration{
 	SMTPFrom:               "GoToSocial",
 	SMTPDiscloseRecipients: false,
 
+	TracingEnabled:           false,
+	TracingTransport:         "grpc",
+	TracingEndpoint:          "",
+	TracingInsecureTransport: false,
+
 	SyslogEnabled:  false,
 	SyslogProtocol: "udp",
 	SyslogAddress:  "localhost:514",
diff --git a/internal/config/helpers.gen.go b/internal/config/helpers.gen.go
index 236d1ea36..b635f7b8e 100644
--- a/internal/config/helpers.gen.go
+++ b/internal/config/helpers.gen.go
@@ -1799,6 +1799,106 @@ func GetOIDCAdminGroups() []string { return global.GetOIDCAdminGroups() }
 // SetOIDCAdminGroups safely sets the value for global configuration 'OIDCAdminGroups' field
 func SetOIDCAdminGroups(v []string) { global.SetOIDCAdminGroups(v) }
 
+// GetTracingEnabled safely fetches the Configuration value for state's 'TracingEnabled' field
+func (st *ConfigState) GetTracingEnabled() (v bool) {
+	st.mutex.Lock()
+	v = st.config.TracingEnabled
+	st.mutex.Unlock()
+	return
+}
+
+// SetTracingEnabled safely sets the Configuration value for state's 'TracingEnabled' field
+func (st *ConfigState) SetTracingEnabled(v bool) {
+	st.mutex.Lock()
+	defer st.mutex.Unlock()
+	st.config.TracingEnabled = v
+	st.reloadToViper()
+}
+
+// TracingEnabledFlag returns the flag name for the 'TracingEnabled' field
+func TracingEnabledFlag() string { return "tracing-enabled" }
+
+// GetTracingEnabled safely fetches the value for global configuration 'TracingEnabled' field
+func GetTracingEnabled() bool { return global.GetTracingEnabled() }
+
+// SetTracingEnabled safely sets the value for global configuration 'TracingEnabled' field
+func SetTracingEnabled(v bool) { global.SetTracingEnabled(v) }
+
+// GetTracingTransport safely fetches the Configuration value for state's 'TracingTransport' field
+func (st *ConfigState) GetTracingTransport() (v string) {
+	st.mutex.Lock()
+	v = st.config.TracingTransport
+	st.mutex.Unlock()
+	return
+}
+
+// SetTracingTransport safely sets the Configuration value for state's 'TracingTransport' field
+func (st *ConfigState) SetTracingTransport(v string) {
+	st.mutex.Lock()
+	defer st.mutex.Unlock()
+	st.config.TracingTransport = v
+	st.reloadToViper()
+}
+
+// TracingTransportFlag returns the flag name for the 'TracingTransport' field
+func TracingTransportFlag() string { return "tracing-transport" }
+
+// GetTracingTransport safely fetches the value for global configuration 'TracingTransport' field
+func GetTracingTransport() string { return global.GetTracingTransport() }
+
+// SetTracingTransport safely sets the value for global configuration 'TracingTransport' field
+func SetTracingTransport(v string) { global.SetTracingTransport(v) }
+
+// GetTracingEndpoint safely fetches the Configuration value for state's 'TracingEndpoint' field
+func (st *ConfigState) GetTracingEndpoint() (v string) {
+	st.mutex.Lock()
+	v = st.config.TracingEndpoint
+	st.mutex.Unlock()
+	return
+}
+
+// SetTracingEndpoint safely sets the Configuration value for state's 'TracingEndpoint' field
+func (st *ConfigState) SetTracingEndpoint(v string) {
+	st.mutex.Lock()
+	defer st.mutex.Unlock()
+	st.config.TracingEndpoint = v
+	st.reloadToViper()
+}
+
+// TracingEndpointFlag returns the flag name for the 'TracingEndpoint' field
+func TracingEndpointFlag() string { return "tracing-endpoint" }
+
+// GetTracingEndpoint safely fetches the value for global configuration 'TracingEndpoint' field
+func GetTracingEndpoint() string { return global.GetTracingEndpoint() }
+
+// SetTracingEndpoint safely sets the value for global configuration 'TracingEndpoint' field
+func SetTracingEndpoint(v string) { global.SetTracingEndpoint(v) }
+
+// GetTracingInsecureTransport safely fetches the Configuration value for state's 'TracingInsecureTransport' field
+func (st *ConfigState) GetTracingInsecureTransport() (v bool) {
+	st.mutex.Lock()
+	v = st.config.TracingInsecureTransport
+	st.mutex.Unlock()
+	return
+}
+
+// SetTracingInsecureTransport safely sets the Configuration value for state's 'TracingInsecureTransport' field
+func (st *ConfigState) SetTracingInsecureTransport(v bool) {
+	st.mutex.Lock()
+	defer st.mutex.Unlock()
+	st.config.TracingInsecureTransport = v
+	st.reloadToViper()
+}
+
+// TracingInsecureTransportFlag returns the flag name for the 'TracingInsecureTransport' field
+func TracingInsecureTransportFlag() string { return "tracing-insecure" }
+
+// GetTracingInsecureTransport safely fetches the value for global configuration 'TracingInsecureTransport' field
+func GetTracingInsecureTransport() bool { return global.GetTracingInsecureTransport() }
+
+// SetTracingInsecureTransport safely sets the value for global configuration 'TracingInsecureTransport' field
+func SetTracingInsecureTransport(v bool) { global.SetTracingInsecureTransport(v) }
+
 // GetSMTPHost safely fetches the Configuration value for state's 'SMTPHost' field
 func (st *ConfigState) GetSMTPHost() (v string) {
 	st.mutex.Lock()
diff --git a/internal/db/bundb/bundb.go b/internal/db/bundb/bundb.go
index 71433dbc2..ecf8752ea 100644
--- a/internal/db/bundb/bundb.go
+++ b/internal/db/bundb/bundb.go
@@ -42,6 +42,7 @@ import (
 	"github.com/superseriousbusiness/gotosocial/internal/id"
 	"github.com/superseriousbusiness/gotosocial/internal/log"
 	"github.com/superseriousbusiness/gotosocial/internal/state"
+	"github.com/superseriousbusiness/gotosocial/internal/tracing"
 	"github.com/uptrace/bun"
 	"github.com/uptrace/bun/dialect/pgdialect"
 	"github.com/uptrace/bun/dialect/sqlitedialect"
@@ -134,6 +135,10 @@ func NewBunDBService(ctx context.Context, state *state.State) (db.DB, error) {
 	// Add database query hook
 	conn.DB.AddQueryHook(queryHook{})
 
+	if config.GetTracingEnabled() {
+		conn.DB.AddQueryHook(tracing.InstrumentBun())
+	}
+
 	// execute sqlite pragmas *after* adding database hook;
 	// this allows the pragma queries to be logged
 	if t == "sqlite" {
diff --git a/internal/middleware/requestid.go b/internal/middleware/requestid.go
index 6e2a83c68..3bf38092f 100644
--- a/internal/middleware/requestid.go
+++ b/internal/middleware/requestid.go
@@ -65,9 +65,8 @@ func generateID() string {
 // AddRequestID returns a gin middleware which adds a unique ID to each request (both response header and context).
 func AddRequestID(header string) gin.HandlerFunc {
 	return func(c *gin.Context) {
-		// Look for existing ID.
 		id := c.GetHeader(header)
-
+		// Have we found anything?
 		if id == "" {
 			// Generate new ID.
 			//
diff --git a/internal/tracing/no_trace.go b/internal/tracing/no_trace.go
new file mode 100644
index 000000000..12f46ce53
--- /dev/null
+++ b/internal/tracing/no_trace.go
@@ -0,0 +1,43 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+//go:build notracing
+
+package tracing
+
+import (
+	"errors"
+
+	"github.com/gin-gonic/gin"
+	"github.com/superseriousbusiness/gotosocial/internal/config"
+	"github.com/uptrace/bun"
+)
+
+func Initialize() error {
+	if config.GetTracingEnabled() {
+		return errors.New("tracing was disabled at build time")
+	}
+	return nil
+}
+
+func InstrumentGin() gin.HandlerFunc {
+	return func(c *gin.Context) {}
+}
+
+func InstrumentBun() bun.QueryHook {
+	return nil
+}
diff --git a/internal/tracing/tracing.go b/internal/tracing/tracing.go
new file mode 100644
index 000000000..a4b664fb2
--- /dev/null
+++ b/internal/tracing/tracing.go
@@ -0,0 +1,175 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+//go:build !notracing
+
+package tracing
+
+import (
+	"context"
+	"fmt"
+
+	"codeberg.org/gruf/go-kv"
+	"github.com/gin-gonic/gin"
+	"github.com/uptrace/bun"
+	"github.com/uptrace/bun/extra/bunotel"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/exporters/jaeger"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+	"go.opentelemetry.io/otel/propagation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/sdk/trace"
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+	"go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
+	oteltrace "go.opentelemetry.io/otel/trace"
+
+	"github.com/superseriousbusiness/gotosocial/internal/config"
+	"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
+	"github.com/superseriousbusiness/gotosocial/internal/log"
+)
+
+const (
+	tracerKey  = "gotosocial-server-tracer"
+	tracerName = "github.com/superseriousbusiness/gotosocial/internal/tracing"
+)
+
+func Initialize() error {
+	if !config.GetTracingEnabled() {
+		return nil
+	}
+
+	insecure := config.GetTracingInsecureTransport()
+
+	var tpo trace.TracerProviderOption
+	switch config.GetTracingTransport() {
+	case "grpc":
+		opts := []otlptracegrpc.Option{
+			otlptracegrpc.WithEndpoint(config.GetTracingEndpoint()),
+		}
+		if insecure {
+			opts = append(opts, otlptracegrpc.WithInsecure())
+		}
+		exp, err := otlptracegrpc.New(context.Background(), opts...)
+		if err != nil {
+			return fmt.Errorf("building tracing exporter: %w", err)
+		}
+		tpo = trace.WithBatcher(exp)
+	case "jaeger":
+		exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(config.GetTracingEndpoint())))
+		if err != nil {
+			return fmt.Errorf("building tracing exporter: %w", err)
+		}
+		tpo = trace.WithBatcher(exp)
+	default:
+		return fmt.Errorf("invalid tracing transport: %s", config.GetTracingTransport())
+	}
+	r, _ := resource.Merge(
+		resource.Default(),
+		resource.NewWithAttributes(
+			semconv.SchemaURL,
+			semconv.ServiceName("GoToSocial"),
+		),
+	)
+
+	tp := trace.NewTracerProvider(
+		tpo,
+		trace.WithResource(r),
+	)
+	otel.SetTracerProvider(tp)
+	propagator := propagation.NewCompositeTextMapPropagator(
+		propagation.TraceContext{},
+		propagation.Baggage{},
+	)
+	otel.SetTextMapPropagator(propagator)
+	log.Hook(func(ctx context.Context, kvs []kv.Field) []kv.Field {
+		span := oteltrace.SpanFromContext(ctx)
+		if span != nil && span.SpanContext().HasTraceID() {
+			return append(kvs, kv.Field{K: "traceID", V: span.SpanContext().TraceID().String()})
+		}
+		return kvs
+	})
+	return nil
+}
+
+// InstrumentGin is a middleware injecting tracing information based on the
+// otelgin implementation found at
+// https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/instrumentation/github.com/gin-gonic/gin/otelgin/gintrace.go
+func InstrumentGin() gin.HandlerFunc {
+	provider := otel.GetTracerProvider()
+	tracer := provider.Tracer(
+		tracerName,
+		oteltrace.WithInstrumentationVersion(config.GetSoftwareVersion()),
+	)
+	propagator := otel.GetTextMapPropagator()
+	return func(c *gin.Context) {
+		c.Set(tracerKey, tracer)
+		savedCtx := c.Request.Context()
+		defer func() {
+			c.Request = c.Request.WithContext(savedCtx)
+		}()
+		ctx := propagator.Extract(savedCtx, propagation.HeaderCarrier(c.Request.Header))
+		opts := []oteltrace.SpanStartOption{
+			oteltrace.WithAttributes(httpconv.ServerRequest(config.GetHost(), c.Request)...),
+			oteltrace.WithSpanKind(oteltrace.SpanKindServer),
+		}
+		spanName := c.FullPath()
+		if spanName == "" {
+			spanName = fmt.Sprintf("HTTP %s route not found", c.Request.Method)
+		} else {
+			rAttr := semconv.HTTPRoute(spanName)
+			opts = append(opts, oteltrace.WithAttributes(rAttr))
+		}
+		id := gtscontext.RequestID(c.Request.Context())
+		if id != "" {
+			opts = append(opts, oteltrace.WithAttributes(attribute.String("requestID", id)))
+		}
+		ctx, span := tracer.Start(ctx, spanName, opts...)
+		defer span.End()
+
+		// pass the span through the request context
+		c.Request = c.Request.WithContext(ctx)
+
+		// serve the request to the next middleware
+		c.Next()
+
+		status := c.Writer.Status()
+		span.SetStatus(httpconv.ServerStatus(status))
+		if status > 0 {
+			span.SetAttributes(semconv.HTTPStatusCode(status))
+		}
+		if len(c.Errors) > 0 {
+			span.SetAttributes(attribute.String("gin.errors", c.Errors.String()))
+		}
+	}
+}
+
+func InjectRequestID() gin.HandlerFunc {
+	return func(c *gin.Context) {
+		id := gtscontext.RequestID(c.Request.Context())
+		if id != "" {
+			span := oteltrace.SpanFromContext(c.Request.Context())
+			span.SetAttributes(attribute.String("requestID", id))
+		}
+	}
+}
+
+func InstrumentBun() bun.QueryHook {
+	return bunotel.NewQueryHook(
+		bunotel.WithFormattedQueries(true),
+	)
+}
diff --git a/test/envparsing.sh b/test/envparsing.sh
index d1f7a9ee7..3580e8517 100755
--- a/test/envparsing.sh
+++ b/test/envparsing.sh
@@ -151,6 +151,10 @@ EXPECT=$(cat <<"EOF"
     "syslog-protocol": "udp",
     "tls-certificate-chain": "",
     "tls-certificate-key": "",
+    "tracing-enabled": false,
+    "tracing-endpoint": "localhost:4317",
+    "tracing-insecure": false,
+    "tracing-transport": "grpc",
     "trusted-proxies": [
         "127.0.0.1/32",
         "docker.host.local"
@@ -240,6 +244,7 @@ GTS_SMTP_DISCLOSE_RECIPIENTS=true \
 GTS_SYSLOG_ENABLED=true \
 GTS_SYSLOG_PROTOCOL='udp' \
 GTS_SYSLOG_ADDRESS='127.0.0.1:6969' \
+GTS_TRACING_ENDPOINT='localhost:4317' \
 GTS_ADVANCED_COOKIES_SAMESITE='strict' \
 GTS_ADVANCED_RATE_LIMIT_REQUESTS=6969 \
 GTS_ADVANCED_SENDER_MULTIPLIER=-1 \
diff --git a/testrig/config.go b/testrig/config.go
index 867341607..dea8ee641 100644
--- a/testrig/config.go
+++ b/testrig/config.go
@@ -113,6 +113,11 @@ var testDefaults = config.Configuration{
 	SMTPFrom:               "GoToSocial",
 	SMTPDiscloseRecipients: false,
 
+	TracingEnabled:           false,
+	TracingEndpoint:          "localhost:4317",
+	TracingTransport:         "grpc",
+	TracingInsecureTransport: true,
+
 	SyslogEnabled:  false,
 	SyslogProtocol: "udp",
 	SyslogAddress:  "localhost:514",
diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore
new file mode 100644
index 000000000..50d95c548
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+# IDEs
+.idea/
diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE
new file mode 100644
index 000000000..89b817996
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
new file mode 100644
index 000000000..16abdfc08
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -0,0 +1,32 @@
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
+
+This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
+
+[Exponential backoff][exponential backoff wiki]
+is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
+in order to gradually find an acceptable rate.
+The retries exponentially increase and stop increasing when a certain threshold is met.
+
+## Usage
+
+Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+
+Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+
+## Contributing
+
+* I would like to keep this library as small as possible.
+* Please don't send a PR without opening an issue and discussing it first.
+* If proposed change is not a common use case, I will probably not accept it.
+
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
+[travis]: https://travis-ci.org/cenkalti/backoff
+[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
+[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
+[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
+
+[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
+[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
+
+[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go
new file mode 100644
index 000000000..3676ee405
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go
@@ -0,0 +1,66 @@
+// Package backoff implements backoff algorithms for retrying operations.
+//
+// Use Retry function for retrying operations that may fail.
+// If Retry does not meet your needs,
+// copy/paste the function into your project and modify as you wish.
+//
+// There is also Ticker type similar to time.Ticker.
+// You can use it if you need to work with channels.
+//
+// See Examples section below for usage examples.
+package backoff
+
+import "time"
+
+// BackOff is a backoff policy for retrying an operation.
+type BackOff interface {
+	// NextBackOff returns the duration to wait before retrying the operation,
+	// or backoff. Stop to indicate that no more retries should be made.
+	//
+	// Example usage:
+	//
+	// 	duration := backoff.NextBackOff();
+	// 	if (duration == backoff.Stop) {
+	// 		// Do not retry operation.
+	// 	} else {
+	// 		// Sleep for duration and retry operation.
+	// 	}
+	//
+	NextBackOff() time.Duration
+
+	// Reset to initial state.
+	Reset()
+}
+
+// Stop indicates that no more retries should be made for use in NextBackOff().
+const Stop time.Duration = -1
+
+// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
+// meaning that the operation is retried immediately without waiting, indefinitely.
+type ZeroBackOff struct{}
+
+func (b *ZeroBackOff) Reset() {}
+
+func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
+
+// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
+// NextBackOff(), meaning that the operation should never be retried.
+type StopBackOff struct{}
+
+func (b *StopBackOff) Reset() {}
+
+func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
+
+// ConstantBackOff is a backoff policy that always returns the same backoff delay.
+// This is in contrast to an exponential backoff policy,
+// which returns a delay that grows longer as you call NextBackOff() over and over again.
+type ConstantBackOff struct {
+	Interval time.Duration
+}
+
+func (b *ConstantBackOff) Reset()                     {}
+func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
+
+func NewConstantBackOff(d time.Duration) *ConstantBackOff {
+	return &ConstantBackOff{Interval: d}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go
new file mode 100644
index 000000000..48482330e
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/context.go
@@ -0,0 +1,62 @@
+package backoff
+
+import (
+	"context"
+	"time"
+)
+
+// BackOffContext is a backoff policy that stops retrying after the context
+// is canceled.
+type BackOffContext interface { // nolint: golint
+	BackOff
+	Context() context.Context
+}
+
+type backOffContext struct {
+	BackOff
+	ctx context.Context
+}
+
+// WithContext returns a BackOffContext with context ctx
+//
+// ctx must not be nil
+func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
+	if ctx == nil {
+		panic("nil context")
+	}
+
+	if b, ok := b.(*backOffContext); ok {
+		return &backOffContext{
+			BackOff: b.BackOff,
+			ctx:     ctx,
+		}
+	}
+
+	return &backOffContext{
+		BackOff: b,
+		ctx:     ctx,
+	}
+}
+
+func getContext(b BackOff) context.Context {
+	if cb, ok := b.(BackOffContext); ok {
+		return cb.Context()
+	}
+	if tb, ok := b.(*backOffTries); ok {
+		return getContext(tb.delegate)
+	}
+	return context.Background()
+}
+
+func (b *backOffContext) Context() context.Context {
+	return b.ctx
+}
+
+func (b *backOffContext) NextBackOff() time.Duration {
+	select {
+	case <-b.ctx.Done():
+		return Stop
+	default:
+		return b.BackOff.NextBackOff()
+	}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
new file mode 100644
index 000000000..2c56c1e71
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -0,0 +1,161 @@
+package backoff
+
+import (
+	"math/rand"
+	"time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+     RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+If the time elapsed since an ExponentialBackOff instance is created goes past the
+MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
+
+The elapsed time can be reset by calling Reset().
+
+Example: Given the following default arguments, for 10 tries the sequence will be,
+and assuming we go over the MaxElapsedTime on the 10th try:
+
+ Request #  RetryInterval (seconds)  Randomized Interval (seconds)
+
+  1          0.5                     [0.25,   0.75]
+  2          0.75                    [0.375,  1.125]
+  3          1.125                   [0.562,  1.687]
+  4          1.687                   [0.8435, 2.53]
+  5          2.53                    [1.265,  3.795]
+  6          3.795                   [1.897,  5.692]
+  7          5.692                   [2.846,  8.538]
+  8          8.538                   [4.269, 12.807]
+  9         12.807                   [6.403, 19.210]
+ 10         19.210                   backoff.Stop
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+	InitialInterval     time.Duration
+	RandomizationFactor float64
+	Multiplier          float64
+	MaxInterval         time.Duration
+	// After MaxElapsedTime the ExponentialBackOff returns Stop.
+	// It never stops if MaxElapsedTime == 0.
+	MaxElapsedTime time.Duration
+	Stop           time.Duration
+	Clock          Clock
+
+	currentInterval time.Duration
+	startTime       time.Time
+}
+
+// Clock is an interface that returns current time for BackOff.
+type Clock interface {
+	Now() time.Time
+}
+
+// Default values for ExponentialBackOff.
+const (
+	DefaultInitialInterval     = 500 * time.Millisecond
+	DefaultRandomizationFactor = 0.5
+	DefaultMultiplier          = 1.5
+	DefaultMaxInterval         = 60 * time.Second
+	DefaultMaxElapsedTime      = 15 * time.Minute
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff() *ExponentialBackOff {
+	b := &ExponentialBackOff{
+		InitialInterval:     DefaultInitialInterval,
+		RandomizationFactor: DefaultRandomizationFactor,
+		Multiplier:          DefaultMultiplier,
+		MaxInterval:         DefaultMaxInterval,
+		MaxElapsedTime:      DefaultMaxElapsedTime,
+		Stop:                Stop,
+		Clock:               SystemClock,
+	}
+	b.Reset()
+	return b
+}
+
+type systemClock struct{}
+
+func (t systemClock) Now() time.Time {
+	return time.Now()
+}
+
+// SystemClock implements Clock interface that uses time.Now().
+var SystemClock = systemClock{}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+	b.currentInterval = b.InitialInterval
+	b.startTime = b.Clock.Now()
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+// 	Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+	// Make sure we have not gone over the maximum elapsed time.
+	elapsed := b.GetElapsedTime()
+	next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+	b.incrementCurrentInterval()
+	if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
+		return b.Stop
+	}
+	return next
+}
+
+// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
+// is created and is reset when Reset() is called.
+//
+// The elapsed time is computed using time.Now().UnixNano(). It is
+// safe to call even while the backoff policy is used by a running
+// ticker.
+func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
+	return b.Clock.Now().Sub(b.startTime)
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+	// Check for overflow, if overflow is detected set the current interval to the max interval.
+	if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+		b.currentInterval = b.MaxInterval
+	} else {
+		b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+	}
+}
+
+// Returns a random value from the following interval:
+// 	[currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+	if randomizationFactor == 0 {
+		return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+	}
+	var delta = randomizationFactor * float64(currentInterval)
+	var minInterval = float64(currentInterval) - delta
+	var maxInterval = float64(currentInterval) + delta
+
+	// Get a random value from the range [minInterval, maxInterval].
+	// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+	// we want a 33% chance for selecting either 1, 2 or 3.
+	return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go
new file mode 100644
index 000000000..b9c0c51cd
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/retry.go
@@ -0,0 +1,146 @@
+package backoff
+
+import (
+	"errors"
+	"time"
+)
+
+// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
+// The operation will be retried using a backoff policy if it returns an error.
+type OperationWithData[T any] func() (T, error)
+
+// An Operation is executing by Retry() or RetryNotify().
+// The operation will be retried using a backoff policy if it returns an error.
+type Operation func() error
+
+func (o Operation) withEmptyData() OperationWithData[struct{}] {
+	return func() (struct{}, error) {
+		return struct{}{}, o()
+	}
+}
+
+// Notify is a notify-on-error function. It receives an operation error and
+// backoff delay if the operation failed (with an error).
+//
+// NOTE that if the backoff policy stated to stop retrying,
+// the notify function isn't called.
+type Notify func(error, time.Duration)
+
+// Retry the operation o until it does not return error or BackOff stops.
+// o is guaranteed to be run at least once.
+//
+// If o returns a *PermanentError, the operation is not retried, and the
+// wrapped error is returned.
+//
+// Retry sleeps the goroutine for the duration returned by BackOff after a
+// failed operation returns.
+func Retry(o Operation, b BackOff) error {
+	return RetryNotify(o, b, nil)
+}
+
+// RetryWithData is like Retry but returns data in the response too.
+func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
+	return RetryNotifyWithData(o, b, nil)
+}
+
+// RetryNotify calls notify function with the error and wait duration
+// for each failed attempt before sleep.
+func RetryNotify(operation Operation, b BackOff, notify Notify) error {
+	return RetryNotifyWithTimer(operation, b, notify, nil)
+}
+
+// RetryNotifyWithData is like RetryNotify but returns data in the response too.
+func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
+	return doRetryNotify(operation, b, notify, nil)
+}
+
+// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
+// for each failed attempt before sleep.
+// A default timer that uses system timer is used when nil is passed.
+func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
+	_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
+	return err
+}
+
+// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
+func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+	return doRetryNotify(operation, b, notify, t)
+}
+
+func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+	var (
+		err  error
+		next time.Duration
+		res  T
+	)
+	if t == nil {
+		t = &defaultTimer{}
+	}
+
+	defer func() {
+		t.Stop()
+	}()
+
+	ctx := getContext(b)
+
+	b.Reset()
+	for {
+		res, err = operation()
+		if err == nil {
+			return res, nil
+		}
+
+		var permanent *PermanentError
+		if errors.As(err, &permanent) {
+			return res, permanent.Err
+		}
+
+		if next = b.NextBackOff(); next == Stop {
+			if cerr := ctx.Err(); cerr != nil {
+				return res, cerr
+			}
+
+			return res, err
+		}
+
+		if notify != nil {
+			notify(err, next)
+		}
+
+		t.Start(next)
+
+		select {
+		case <-ctx.Done():
+			return res, ctx.Err()
+		case <-t.C():
+		}
+	}
+}
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+	Err error
+}
+
+func (e *PermanentError) Error() string {
+	return e.Err.Error()
+}
+
+func (e *PermanentError) Unwrap() error {
+	return e.Err
+}
+
+func (e *PermanentError) Is(target error) bool {
+	_, ok := target.(*PermanentError)
+	return ok
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+	if err == nil {
+		return nil
+	}
+	return &PermanentError{
+		Err: err,
+	}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go
new file mode 100644
index 000000000..df9d68bce
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go
@@ -0,0 +1,97 @@
+package backoff
+
+import (
+	"context"
+	"sync"
+	"time"
+)
+
+// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
+//
+// Ticks will continue to arrive when the previous operation is still running,
+// so operations that take a while to fail could run in quick succession.
+type Ticker struct {
+	C        <-chan time.Time
+	c        chan time.Time
+	b        BackOff
+	ctx      context.Context
+	timer    Timer
+	stop     chan struct{}
+	stopOnce sync.Once
+}
+
+// NewTicker returns a new Ticker containing a channel that will send
+// the time at times specified by the BackOff argument. Ticker is
+// guaranteed to tick at least once.  The channel is closed when Stop
+// method is called or BackOff stops. It is not safe to manipulate the
+// provided backoff policy (notably calling NextBackOff or Reset)
+// while the ticker is running.
+func NewTicker(b BackOff) *Ticker {
+	return NewTickerWithTimer(b, &defaultTimer{})
+}
+
+// NewTickerWithTimer returns a new Ticker with a custom timer.
+// A default timer that uses system timer is used when nil is passed.
+func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
+	if timer == nil {
+		timer = &defaultTimer{}
+	}
+	c := make(chan time.Time)
+	t := &Ticker{
+		C:     c,
+		c:     c,
+		b:     b,
+		ctx:   getContext(b),
+		timer: timer,
+		stop:  make(chan struct{}),
+	}
+	t.b.Reset()
+	go t.run()
+	return t
+}
+
+// Stop turns off a ticker. After Stop, no more ticks will be sent.
+func (t *Ticker) Stop() {
+	t.stopOnce.Do(func() { close(t.stop) })
+}
+
+func (t *Ticker) run() {
+	c := t.c
+	defer close(c)
+
+	// Ticker is guaranteed to tick at least once.
+	afterC := t.send(time.Now())
+
+	for {
+		if afterC == nil {
+			return
+		}
+
+		select {
+		case tick := <-afterC:
+			afterC = t.send(tick)
+		case <-t.stop:
+			t.c = nil // Prevent future ticks from being sent to the channel.
+			return
+		case <-t.ctx.Done():
+			return
+		}
+	}
+}
+
+func (t *Ticker) send(tick time.Time) <-chan time.Time {
+	select {
+	case t.c <- tick:
+	case <-t.stop:
+		return nil
+	}
+
+	next := t.b.NextBackOff()
+	if next == Stop {
+		t.Stop()
+		return nil
+	}
+
+	t.timer.Start(next)
+	return t.timer.C()
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go
new file mode 100644
index 000000000..8120d0213
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/timer.go
@@ -0,0 +1,35 @@
+package backoff
+
+import "time"
+
+type Timer interface {
+	Start(duration time.Duration)
+	Stop()
+	C() <-chan time.Time
+}
+
+// defaultTimer implements Timer interface using time.Timer
+type defaultTimer struct {
+	timer *time.Timer
+}
+
+// C returns the timers channel which receives the current time when the timer fires.
+func (t *defaultTimer) C() <-chan time.Time {
+	return t.timer.C
+}
+
+// Start starts the timer to fire after the given duration
+func (t *defaultTimer) Start(duration time.Duration) {
+	if t.timer == nil {
+		t.timer = time.NewTimer(duration)
+	} else {
+		t.timer.Reset(duration)
+	}
+}
+
+// Stop is called when the timer is not used anymore and resources may be freed.
+func (t *defaultTimer) Stop() {
+	if t.timer != nil {
+		t.timer.Stop()
+	}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go
new file mode 100644
index 000000000..28d58ca37
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/tries.go
@@ -0,0 +1,38 @@
+package backoff
+
+import "time"
+
+/*
+WithMaxRetries creates a wrapper around another BackOff, which will
+return Stop if NextBackOff() has been called too many times since
+the last time Reset() was called
+
+Note: Implementation is not thread-safe.
+*/
+func WithMaxRetries(b BackOff, max uint64) BackOff {
+	return &backOffTries{delegate: b, maxTries: max}
+}
+
+type backOffTries struct {
+	delegate BackOff
+	maxTries uint64
+	numTries uint64
+}
+
+func (b *backOffTries) NextBackOff() time.Duration {
+	if b.maxTries == 0 {
+		return Stop
+	}
+	if b.maxTries > 0 {
+		if b.maxTries <= b.numTries {
+			return Stop
+		}
+		b.numTries++
+	}
+	return b.delegate.NextBackOff()
+}
+
+func (b *backOffTries) Reset() {
+	b.numTries = 0
+	b.delegate.Reset()
+}
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
new file mode 100644
index 000000000..94ff801df
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -0,0 +1,29 @@
+run:
+  timeout: 1m
+  tests: true
+
+linters:
+  disable-all: true
+  enable:
+    - asciicheck
+    - deadcode
+    - errcheck
+    - forcetypeassert
+    - gocritic
+    - gofmt
+    - goimports
+    - gosimple
+    - govet
+    - ineffassign
+    - misspell
+    - revive
+    - staticcheck
+    - structcheck
+    - typecheck
+    - unused
+    - varcheck
+
+issues:
+  exclude-use-default: false
+  max-issues-per-linter: 0
+  max-same-issues: 10
diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md
new file mode 100644
index 000000000..c35696004
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CHANGELOG.md
@@ -0,0 +1,6 @@
+# CHANGELOG
+
+## v1.0.0-rc1
+
+This is the first logged release.  Major changes (including breaking changes)
+have occurred since earlier tags.
diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
new file mode 100644
index 000000000..5d37e294c
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+# Contributing
+
+Logr is open to pull-requests, provided they fit within the intended scope of
+the project.  Specifically, this library aims to be VERY small and minimalist,
+with no external dependencies.
+
+## Compatibility
+
+This project intends to follow [semantic versioning](http://semver.org) and
+is very strict about compatibility.  Any proposed changes MUST follow those
+rules.
+
+## Performance
+
+As a logging library, logr must be as light-weight as possible.  Any proposed
+code change must include results of running the [benchmark](./benchmark)
+before and after the change.
diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
new file mode 100644
index 000000000..ab5931181
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -0,0 +1,282 @@
+# A minimal logging API for Go
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
+
+logr offers an(other) opinion on how Go programs and libraries can do logging
+without becoming coupled to a particular logging implementation.  This is not
+an implementation of logging - it is an API.  In fact it is two APIs with two
+different sets of users.
+
+The `Logger` type is intended for application and library authors.  It provides
+a relatively small API which can be used everywhere you want to emit logs.  It
+defers the actual act of writing logs (to files, to stdout, or whatever) to the
+`LogSink` interface.
+
+The `LogSink` interface is intended for logging library implementers.  It is a
+pure interface which can be implemented by logging frameworks to provide the actual logging
+functionality.
+
+This decoupling allows application and library developers to write code in
+terms of `logr.Logger` (which has very low dependency fan-out) while the
+implementation of logging is managed "up stack" (e.g. in or near `main()`.)
+Application developers can then switch out implementations as necessary.
+
+Many people assert that libraries should not be logging, and as such efforts
+like this are pointless.  Those people are welcome to convince the authors of
+the tens-of-thousands of libraries that *DO* write logs that they are all
+wrong.  In the meantime, logr takes a more practical approach.
+
+## Typical usage
+
+Somewhere, early in an application's life, it will make a decision about which
+logging library (implementation) it actually wants to use.  Something like:
+
+```
+    func main() {
+        // ... other setup code ...
+
+        // Create the "root" logger.  We have chosen the "logimpl" implementation,
+        // which takes some initial parameters and returns a logr.Logger.
+        logger := logimpl.New(param1, param2)
+
+        // ... other setup code ...
+```
+
+Most apps will call into other libraries, create structures to govern the flow,
+etc.  The `logr.Logger` object can be passed to these other libraries, stored
+in structs, or even used as a package-global variable, if needed.  For example:
+
+```
+    app := createTheAppObject(logger)
+    app.Run()
+```
+
+Outside of this early setup, no other packages need to know about the choice of
+implementation.  They write logs in terms of the `logr.Logger` that they
+received:
+
+```
+    type appObject struct {
+        // ... other fields ...
+        logger logr.Logger
+        // ... other fields ...
+    }
+
+    func (app *appObject) Run() {
+        app.logger.Info("starting up", "timestamp", time.Now())
+
+        // ... app code ...
+```
+
+## Background
+
+If the Go standard library had defined an interface for logging, this project
+probably would not be needed.  Alas, here we are.
+
+### Inspiration
+
+Before you consider this package, please read [this blog post by the
+inimitable Dave Cheney][warning-makes-no-sense].  We really appreciate what
+he has to say, and it largely aligns with our own experiences.
+
+### Differences from Dave's ideas
+
+The main differences are:
+
+1. Dave basically proposes doing away with the notion of a logging API in favor
+of `fmt.Printf()`.  We disagree, especially when you consider things like output
+locations, timestamps, file and line decorations, and structured logging.  This
+package restricts the logging API to just 2 types of logs: info and error.
+
+Info logs are things you want to tell the user which are not errors.  Error
+logs are, well, errors.  If your code receives an `error` from a subordinate
+function call and is logging that `error` *and not returning it*, use error
+logs.
+
+2. Verbosity-levels on info logs.  This gives developers a chance to indicate
+arbitrary grades of importance for info logs, without assigning names with
+semantic meaning such as "warning", "trace", and "debug."  Superficially this
+may feel very similar, but the primary difference is the lack of semantics.
+Because verbosity is a numerical value, it's safe to assume that an app running
+with higher verbosity means more (and less important) logs will be generated.
+
+## Implementations (non-exhaustive)
+
+There are implementations for the following logging libraries:
+
+- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
+- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
+- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
+- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
+- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
+- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
+- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
+- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
+- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
+- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
+- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
+- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
+- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
+
+## FAQ
+
+### Conceptual
+
+#### Why structured logging?
+
+- **Structured logs are more easily queryable**: Since you've got
+  key-value pairs, it's much easier to query your structured logs for
+  particular values by filtering on the contents of a particular key --
+  think searching request logs for error codes, Kubernetes reconcilers for
+  the name and namespace of the reconciled object, etc.
+
+- **Structured logging makes it easier to have cross-referenceable logs**:
+  Similarly to searchability, if you maintain conventions around your
+  keys, it becomes easy to gather all log lines related to a particular
+  concept.
+
+- **Structured logs allow better dimensions of filtering**: if you have
+  structure to your logs, you've got more precise control over how much
+  information is logged -- you might choose in a particular configuration
+  to log certain keys but not others, only log lines where a certain key
+  matches a certain value, etc., instead of just having v-levels and names
+  to key off of.
+
+- **Structured logs better represent structured data**: sometimes, the
+  data that you want to log is inherently structured (think tuple-link
+  objects.)  Structured logs allow you to preserve that structure when
+  outputting.
+
+#### Why V-levels?
+
+**V-levels give operators an easy way to control the chattiness of log
+operations**.  V-levels provide a way for a given package to distinguish
+the relative importance or verbosity of a given log message.  Then, if
+a particular logger or package is logging too many messages, the user
+of the package can simply change the v-levels for that library.
+
+#### Why not named levels, like Info/Warning/Error?
+
+Read [Dave Cheney's post][warning-makes-no-sense].  Then read [Differences
+from Dave's ideas](#differences-from-daves-ideas).
+
+#### Why not allow format strings, too?
+
+**Format strings negate many of the benefits of structured logs**:
+
+- They're not easily searchable without resorting to fuzzy searching,
+  regular expressions, etc.
+
+- They don't store structured data well, since contents are flattened into
+  a string.
+
+- They're not cross-referenceable.
+
+- They don't compress easily, since the message is not constant.
+
+(Unless you turn positional parameters into key-value pairs with numerical
+keys, at which point you've gotten key-value logging with meaningless
+keys.)
+
+### Practical
+
+#### Why key-value pairs, and not a map?
+
+Key-value pairs are *much* easier to optimize, especially around
+allocations.  Zap (a structured logger that inspired logr's interface) has
+[performance measurements](https://github.com/uber-go/zap#performance)
+that show this quite nicely.
+
+While the interface ends up being a little less obvious, you get
+potentially better performance, plus avoid making users type
+`map[string]string{}` every time they want to log.
+
+#### What if my V-levels differ between libraries?
+
+That's fine.  Control your V-levels on a per-logger basis, and use the
+`WithName` method to pass different loggers to different libraries.
+
+Generally, you should take care to ensure that you have relatively
+consistent V-levels within a given logger, however, as this makes deciding
+on what verbosity of logs to request easier.
+
+#### But I really want to use a format string!
+
+That's not actually a question.  Assuming your question is "how do
+I convert my mental model of logging with format strings to logging with
+constant messages":
+
+1. Figure out what the error actually is, as you'd write in a TL;DR style,
+   and use that as a message.
+
+2. For every place you'd write a format specifier, look to the word before
+   it, and add that as a key value pair.
+
+For instance, consider the following examples (all taken from spots in the
+Kubernetes codebase):
+
+- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
+  responseCode, err)` becomes `logger.Error(err, "client returned an
+  error", "code", responseCode)`
+
+- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
+  seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
+  response when requesting url", "attempt", retries, "after
+  seconds", seconds, "url", url)`
+
+If you *really* must use a format string, use it in a key's value, and
+call `fmt.Sprintf` yourself.  For instance: `log.Printf("unable to
+reflect over type %T")` becomes `logger.Info("unable to reflect over
+type", "type", fmt.Sprintf("%T"))`.  In general though, the cases where
+this is necessary should be few and far between.
+
+#### How do I choose my V-levels?
+
+This is basically the only hard constraint: increase V-levels to denote
+more verbose or more debug-y logs.
+
+Otherwise, you can start out with `0` as "you always want to see this",
+`1` as "common logging that you might *possibly* want to turn off", and
+`10` as "I would like to performance-test your log collection stack."
+
+Then gradually choose levels in between as you need them, working your way
+down from 10 (for debug and trace style logs) and up from 1 (for chattier
+info-type logs.)
+
+#### How do I choose my keys?
+
+Keys are fairly flexible, and can hold more or less any string
+value. For best compatibility with implementations and consistency
+with existing code in other projects, there are a few conventions you
+should consider.
+
+- Make your keys human-readable.
+- Constant keys are generally a good idea.
+- Be consistent across your codebase.
+- Keys should naturally match parts of the message string.
+- Use lower case for simple keys and
+  [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
+  more complex ones. Kubernetes is one example of a project that has
+  [adopted that
+  convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
+
+While key names are mostly unrestricted (and spaces are acceptable),
+it's generally a good idea to stick to printable ascii characters, or at
+least match the general character set of your log lines.
+
+#### Why should keys be constant values?
+
+The point of structured logging is to make later log processing easier.  Your
+keys are, effectively, the schema of each log message.  If you use different
+keys across instances of the same log line, you will make your structured logs
+much harder to use.  `Sprintf()` is for values, not for keys!
+
+#### Why is this not a pure interface?
+
+The Logger type is implemented as a struct in order to allow the Go compiler to
+optimize things like high-V `Info` logs that are not triggered.  Not all of
+these implementations are implemented yet, but this structure was suggested as
+a way to ensure they *can* be implemented.  All of the real work is behind the
+`LogSink` interface.
+
+[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
new file mode 100644
index 000000000..9d92a38f1
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/discard.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2020 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// Discard returns a Logger that discards all messages logged to it.  It can be
+// used whenever the caller is not interested in the logs.  Logger instances
+// produced by this function always compare as equal.
+func Discard() Logger {
+	return Logger{
+		level: 0,
+		sink:  discardLogSink{},
+	}
+}
+
+// discardLogSink is a LogSink that discards all messages.
+type discardLogSink struct{}
+
+// Verify that it actually implements the interface
+var _ LogSink = discardLogSink{}
+
+func (l discardLogSink) Init(RuntimeInfo) {
+}
+
+func (l discardLogSink) Enabled(int) bool {
+	return false
+}
+
+func (l discardLogSink) Info(int, string, ...interface{}) {
+}
+
+func (l discardLogSink) Error(error, string, ...interface{}) {
+}
+
+func (l discardLogSink) WithValues(...interface{}) LogSink {
+	return l
+}
+
+func (l discardLogSink) WithName(string) LogSink {
+	return l
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
new file mode 100644
index 000000000..7accdb0c4
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -0,0 +1,787 @@
+/*
+Copyright 2021 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package funcr implements formatting of structured log messages and
+// optionally captures the call site and timestamp.
+//
+// The simplest way to use it is via its implementation of a
+// github.com/go-logr/logr.LogSink with output through an arbitrary
+// "write" function.  See New and NewJSON for details.
+//
+// Custom LogSinks
+//
+// For users who need more control, a funcr.Formatter can be embedded inside
+// your own custom LogSink implementation. This is useful when the LogSink
+// needs to implement additional methods, for example.
+//
+// Formatting
+//
+// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
+// values which are being logged.  When rendering a struct, funcr will use Go's
+// standard JSON tags (all except "string").
+package funcr
+
+import (
+	"bytes"
+	"encoding"
+	"fmt"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/go-logr/logr"
+)
+
+// New returns a logr.Logger which is implemented by an arbitrary function.
+func New(fn func(prefix, args string), opts Options) logr.Logger {
+	return logr.New(newSink(fn, NewFormatter(opts)))
+}
+
+// NewJSON returns a logr.Logger which is implemented by an arbitrary function
+// and produces JSON output.
+func NewJSON(fn func(obj string), opts Options) logr.Logger {
+	fnWrapper := func(_, obj string) {
+		fn(obj)
+	}
+	return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
+}
+
+// Underlier exposes access to the underlying logging function. Since
+// callers only have a logr.Logger, they have to know which
+// implementation is in use, so this interface is less of an
+// abstraction and more of a way to test type conversion.
+type Underlier interface {
+	GetUnderlying() func(prefix, args string)
+}
+
+func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
+	l := &fnlogger{
+		Formatter: formatter,
+		write:     fn,
+	}
+	// For skipping fnlogger.Info and fnlogger.Error.
+	l.Formatter.AddCallDepth(1)
+	return l
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+	// LogCaller tells funcr to add a "caller" key to some or all log lines.
+	// This has some overhead, so some users might not want it.
+	LogCaller MessageClass
+
+	// LogCallerFunc tells funcr to also log the calling function name.  This
+	// has no effect if caller logging is not enabled (see Options.LogCaller).
+	LogCallerFunc bool
+
+	// LogTimestamp tells funcr to add a "ts" key to log lines.  This has some
+	// overhead, so some users might not want it.
+	LogTimestamp bool
+
+	// TimestampFormat tells funcr how to render timestamps when LogTimestamp
+	// is enabled.  If not specified, a default format will be used.  For more
+	// details, see docs for Go's time.Layout.
+	TimestampFormat string
+
+	// Verbosity tells funcr which V logs to produce.  Higher values enable
+	// more logs.  Info logs at or below this level will be written, while logs
+	// above this level will be discarded.
+	Verbosity int
+
+	// RenderBuiltinsHook allows users to mutate the list of key-value pairs
+	// while a log line is being rendered.  The kvList argument follows logr
+	// conventions - each pair of slice elements is comprised of a string key
+	// and an arbitrary value (verified and sanitized before calling this
+	// hook).  The value returned must follow the same conventions.  This hook
+	// can be used to audit or modify logged data.  For example, you might want
+	// to prefix all of funcr's built-in keys with some string.  This hook is
+	// only called for built-in (provided by funcr itself) key-value pairs.
+	// Equivalent hooks are offered for key-value pairs saved via
+	// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
+	// for user-provided pairs (see RenderArgsHook).
+	RenderBuiltinsHook func(kvList []interface{}) []interface{}
+
+	// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
+	// only called for key-value pairs saved via logr.Logger.WithValues.  See
+	// RenderBuiltinsHook for more details.
+	RenderValuesHook func(kvList []interface{}) []interface{}
+
+	// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
+	// called for key-value pairs passed directly to Info and Error.  See
+	// RenderBuiltinsHook for more details.
+	RenderArgsHook func(kvList []interface{}) []interface{}
+
+	// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
+	// that contains a struct, etc.) it may log.  Every time it finds a struct,
+	// slice, array, or map the depth is increased by one.  When the maximum is
+	// reached, the value will be converted to a string indicating that the max
+	// depth has been exceeded.  If this field is not specified, a default
+	// value will be used.
+	MaxLogDepth int
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+	// None ignores all message classes.
+	None MessageClass = iota
+	// All considers all message classes.
+	All
+	// Info only considers info messages.
+	Info
+	// Error only considers error messages.
+	Error
+)
+
+// fnlogger inherits some of its LogSink implementation from Formatter
+// and just needs to add some glue code.
+type fnlogger struct {
+	Formatter
+	write func(prefix, args string)
+}
+
+func (l fnlogger) WithName(name string) logr.LogSink {
+	l.Formatter.AddName(name)
+	return &l
+}
+
+func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
+	l.Formatter.AddValues(kvList)
+	return &l
+}
+
+func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
+	l.Formatter.AddCallDepth(depth)
+	return &l
+}
+
+func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
+	prefix, args := l.FormatInfo(level, msg, kvList)
+	l.write(prefix, args)
+}
+
+func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
+	prefix, args := l.FormatError(err, msg, kvList)
+	l.write(prefix, args)
+}
+
+func (l fnlogger) GetUnderlying() func(prefix, args string) {
+	return l.write
+}
+
+// Assert conformance to the interfaces.
+var _ logr.LogSink = &fnlogger{}
+var _ logr.CallDepthLogSink = &fnlogger{}
+var _ Underlier = &fnlogger{}
+
+// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
+func NewFormatter(opts Options) Formatter {
+	return newFormatter(opts, outputKeyValue)
+}
+
+// NewFormatterJSON constructs a Formatter which emits strict JSON.
+func NewFormatterJSON(opts Options) Formatter {
+	return newFormatter(opts, outputJSON)
+}
+
+// Defaults for Options.
+const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
+const defaultMaxLogDepth = 16
+
+func newFormatter(opts Options, outfmt outputFormat) Formatter {
+	if opts.TimestampFormat == "" {
+		opts.TimestampFormat = defaultTimestampFormat
+	}
+	if opts.MaxLogDepth == 0 {
+		opts.MaxLogDepth = defaultMaxLogDepth
+	}
+	f := Formatter{
+		outputFormat: outfmt,
+		prefix:       "",
+		values:       nil,
+		depth:        0,
+		opts:         opts,
+	}
+	return f
+}
+
+// Formatter is an opaque struct which can be embedded in a LogSink
+// implementation. It should be constructed with NewFormatter. Some of
+// its methods directly implement logr.LogSink.
+type Formatter struct {
+	outputFormat outputFormat
+	prefix       string
+	values       []interface{}
+	valuesStr    string
+	depth        int
+	opts         Options
+}
+
+// outputFormat indicates which outputFormat to use.
+type outputFormat int
+
+const (
+	// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
+	outputKeyValue outputFormat = iota
+	// outputJSON emits strict JSON.
+	outputJSON
+)
+
+// PseudoStruct is a list of key-value pairs that gets logged as a struct.
+type PseudoStruct []interface{}
+
+// render produces a log line, ready to use.
+func (f Formatter) render(builtins, args []interface{}) string {
+	// Empirically bytes.Buffer is faster than strings.Builder for this.
+	buf := bytes.NewBuffer(make([]byte, 0, 1024))
+	if f.outputFormat == outputJSON {
+		buf.WriteByte('{')
+	}
+	vals := builtins
+	if hook := f.opts.RenderBuiltinsHook; hook != nil {
+		vals = hook(f.sanitize(vals))
+	}
+	f.flatten(buf, vals, false, false) // keys are ours, no need to escape
+	continuing := len(builtins) > 0
+	if len(f.valuesStr) > 0 {
+		if continuing {
+			if f.outputFormat == outputJSON {
+				buf.WriteByte(',')
+			} else {
+				buf.WriteByte(' ')
+			}
+		}
+		continuing = true
+		buf.WriteString(f.valuesStr)
+	}
+	vals = args
+	if hook := f.opts.RenderArgsHook; hook != nil {
+		vals = hook(f.sanitize(vals))
+	}
+	f.flatten(buf, vals, continuing, true) // escape user-provided keys
+	if f.outputFormat == outputJSON {
+		buf.WriteByte('}')
+	}
+	return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer.  If continuing is
+// true, it assumes that the buffer has previous values and will emit a
+// separator (which depends on the output format) before the first pair it
+// writes.  If escapeKeys is true, the keys are assumed to have
+// non-JSON-compatible characters in them and must be evaluated for escapes.
+//
+// This function returns a potentially modified version of kvList, which
+// ensures that there is a value for every key (adding a value if needed) and
+// that each key is a string (substituting a key if needed).
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
+	// This logic overlaps with sanitize() but saves one type-cast per key,
+	// which can be measurable.
+	if len(kvList)%2 != 0 {
+		kvList = append(kvList, noValue)
+	}
+	for i := 0; i < len(kvList); i += 2 {
+		k, ok := kvList[i].(string)
+		if !ok {
+			k = f.nonStringKey(kvList[i])
+			kvList[i] = k
+		}
+		v := kvList[i+1]
+
+		if i > 0 || continuing {
+			if f.outputFormat == outputJSON {
+				buf.WriteByte(',')
+			} else {
+				// In theory the format could be something we don't understand.  In
+				// practice, we control it, so it won't be.
+				buf.WriteByte(' ')
+			}
+		}
+
+		if escapeKeys {
+			buf.WriteString(prettyString(k))
+		} else {
+			// this is faster
+			buf.WriteByte('"')
+			buf.WriteString(k)
+			buf.WriteByte('"')
+		}
+		if f.outputFormat == outputJSON {
+			buf.WriteByte(':')
+		} else {
+			buf.WriteByte('=')
+		}
+		buf.WriteString(f.pretty(v))
+	}
+	return kvList
+}
+
+func (f Formatter) pretty(value interface{}) string {
+	return f.prettyWithFlags(value, 0, 0)
+}
+
+const (
+	flagRawStruct = 0x1 // do not print braces on structs
+)
+
+// TODO: This is not fast. Most of the overhead goes here.
+func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
+	if depth > f.opts.MaxLogDepth {
+		return `"<max-log-depth-exceeded>"`
+	}
+
+	// Handle types that take full control of logging.
+	if v, ok := value.(logr.Marshaler); ok {
+		// Replace the value with what the type wants to get logged.
+		// That then gets handled below via reflection.
+		value = invokeMarshaler(v)
+	}
+
+	// Handle types that want to format themselves.
+	switch v := value.(type) {
+	case fmt.Stringer:
+		value = invokeStringer(v)
+	case error:
+		value = invokeError(v)
+	}
+
+	// Handling the most common types without reflect is a small perf win.
+	switch v := value.(type) {
+	case bool:
+		return strconv.FormatBool(v)
+	case string:
+		return prettyString(v)
+	case int:
+		return strconv.FormatInt(int64(v), 10)
+	case int8:
+		return strconv.FormatInt(int64(v), 10)
+	case int16:
+		return strconv.FormatInt(int64(v), 10)
+	case int32:
+		return strconv.FormatInt(int64(v), 10)
+	case int64:
+		return strconv.FormatInt(int64(v), 10)
+	case uint:
+		return strconv.FormatUint(uint64(v), 10)
+	case uint8:
+		return strconv.FormatUint(uint64(v), 10)
+	case uint16:
+		return strconv.FormatUint(uint64(v), 10)
+	case uint32:
+		return strconv.FormatUint(uint64(v), 10)
+	case uint64:
+		return strconv.FormatUint(v, 10)
+	case uintptr:
+		return strconv.FormatUint(uint64(v), 10)
+	case float32:
+		return strconv.FormatFloat(float64(v), 'f', -1, 32)
+	case float64:
+		return strconv.FormatFloat(v, 'f', -1, 64)
+	case complex64:
+		return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
+	case complex128:
+		return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
+	case PseudoStruct:
+		buf := bytes.NewBuffer(make([]byte, 0, 1024))
+		v = f.sanitize(v)
+		if flags&flagRawStruct == 0 {
+			buf.WriteByte('{')
+		}
+		for i := 0; i < len(v); i += 2 {
+			if i > 0 {
+				buf.WriteByte(',')
+			}
+			k, _ := v[i].(string) // sanitize() above means no need to check success
+			// arbitrary keys might need escaping
+			buf.WriteString(prettyString(k))
+			buf.WriteByte(':')
+			buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
+		}
+		if flags&flagRawStruct == 0 {
+			buf.WriteByte('}')
+		}
+		return buf.String()
+	}
+
+	buf := bytes.NewBuffer(make([]byte, 0, 256))
+	t := reflect.TypeOf(value)
+	if t == nil {
+		return "null"
+	}
+	v := reflect.ValueOf(value)
+	switch t.Kind() {
+	case reflect.Bool:
+		return strconv.FormatBool(v.Bool())
+	case reflect.String:
+		return prettyString(v.String())
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return strconv.FormatInt(int64(v.Int()), 10)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return strconv.FormatUint(uint64(v.Uint()), 10)
+	case reflect.Float32:
+		return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
+	case reflect.Float64:
+		return strconv.FormatFloat(v.Float(), 'f', -1, 64)
+	case reflect.Complex64:
+		return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
+	case reflect.Complex128:
+		return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
+	case reflect.Struct:
+		if flags&flagRawStruct == 0 {
+			buf.WriteByte('{')
+		}
+		for i := 0; i < t.NumField(); i++ {
+			fld := t.Field(i)
+			if fld.PkgPath != "" {
+				// reflect says this field is only defined for non-exported fields.
+				continue
+			}
+			if !v.Field(i).CanInterface() {
+				// reflect isn't clear exactly what this means, but we can't use it.
+				continue
+			}
+			name := ""
+			omitempty := false
+			if tag, found := fld.Tag.Lookup("json"); found {
+				if tag == "-" {
+					continue
+				}
+				if comma := strings.Index(tag, ","); comma != -1 {
+					if n := tag[:comma]; n != "" {
+						name = n
+					}
+					rest := tag[comma:]
+					if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
+						omitempty = true
+					}
+				} else {
+					name = tag
+				}
+			}
+			if omitempty && isEmpty(v.Field(i)) {
+				continue
+			}
+			if i > 0 {
+				buf.WriteByte(',')
+			}
+			if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
+				buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
+				continue
+			}
+			if name == "" {
+				name = fld.Name
+			}
+			// field names can't contain characters which need escaping
+			buf.WriteByte('"')
+			buf.WriteString(name)
+			buf.WriteByte('"')
+			buf.WriteByte(':')
+			buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
+		}
+		if flags&flagRawStruct == 0 {
+			buf.WriteByte('}')
+		}
+		return buf.String()
+	case reflect.Slice, reflect.Array:
+		buf.WriteByte('[')
+		for i := 0; i < v.Len(); i++ {
+			if i > 0 {
+				buf.WriteByte(',')
+			}
+			e := v.Index(i)
+			buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
+		}
+		buf.WriteByte(']')
+		return buf.String()
+	case reflect.Map:
+		buf.WriteByte('{')
+		// This does not sort the map keys, for best perf.
+		it := v.MapRange()
+		i := 0
+		for it.Next() {
+			if i > 0 {
+				buf.WriteByte(',')
+			}
+			// If a map key supports TextMarshaler, use it.
+			keystr := ""
+			if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
+				txt, err := m.MarshalText()
+				if err != nil {
+					keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
+				} else {
+					keystr = string(txt)
+				}
+				keystr = prettyString(keystr)
+			} else {
+				// prettyWithFlags will produce already-escaped values
+				keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
+				if t.Key().Kind() != reflect.String {
+					// JSON only does string keys.  Unlike Go's standard JSON, we'll
+					// convert just about anything to a string.
+					keystr = prettyString(keystr)
+				}
+			}
+			buf.WriteString(keystr)
+			buf.WriteByte(':')
+			buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
+			i++
+		}
+		buf.WriteByte('}')
+		return buf.String()
+	case reflect.Ptr, reflect.Interface:
+		if v.IsNil() {
+			return "null"
+		}
+		return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
+	}
+	return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
+}
+
+func prettyString(s string) string {
+	// Avoid escaping (which does allocations) if we can.
+	if needsEscape(s) {
+		return strconv.Quote(s)
+	}
+	b := bytes.NewBuffer(make([]byte, 0, 1024))
+	b.WriteByte('"')
+	b.WriteString(s)
+	b.WriteByte('"')
+	return b.String()
+}
+
+// needsEscape determines whether the input string needs to be escaped or not,
+// without doing any allocations.
+func needsEscape(s string) bool {
+	for _, r := range s {
+		if !strconv.IsPrint(r) || r == '\\' || r == '"' {
+			return true
+		}
+	}
+	return false
+}
+
+func isEmpty(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Complex64, reflect.Complex128:
+		return v.Complex() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+	return false
+}
+
+func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
+	defer func() {
+		if r := recover(); r != nil {
+			ret = fmt.Sprintf("<panic: %s>", r)
+		}
+	}()
+	return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+	defer func() {
+		if r := recover(); r != nil {
+			ret = fmt.Sprintf("<panic: %s>", r)
+		}
+	}()
+	return s.String()
+}
+
+func invokeError(e error) (ret string) {
+	defer func() {
+		if r := recover(); r != nil {
+			ret = fmt.Sprintf("<panic: %s>", r)
+		}
+	}()
+	return e.Error()
+}
+
+// Caller represents the original call site for a log line, after considering
+// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper.  The File and
+// Line fields will always be provided, while the Func field is optional.
+// Users can set the render hook fields in Options to examine logged key-value
+// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
+// field is enabled for the given MessageClass.
+type Caller struct {
+	// File is the basename of the file for this call site.
+	File string `json:"file"`
+	// Line is the line number in the file for this call site.
+	Line int `json:"line"`
+	// Func is the function name for this call site, or empty if
+	// Options.LogCallerFunc is not enabled.
+	Func string `json:"function,omitempty"`
+}
+
+func (f Formatter) caller() Caller {
+	// +1 for this frame, +1 for Info/Error.
+	pc, file, line, ok := runtime.Caller(f.depth + 2)
+	if !ok {
+		return Caller{"<unknown>", 0, ""}
+	}
+	fn := ""
+	if f.opts.LogCallerFunc {
+		if fp := runtime.FuncForPC(pc); fp != nil {
+			fn = fp.Name()
+		}
+	}
+
+	return Caller{filepath.Base(file), line, fn}
+}
+
+const noValue = "<no-value>"
+
+func (f Formatter) nonStringKey(v interface{}) string {
+	return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
+}
+
+// snippet produces a short snippet string of an arbitrary value.
+func (f Formatter) snippet(v interface{}) string {
+	const snipLen = 16
+
+	snip := f.pretty(v)
+	if len(snip) > snipLen {
+		snip = snip[:snipLen]
+	}
+	return snip
+}
+
+// sanitize ensures that a list of key-value pairs has a value for every key
+// (adding a value if needed) and that each key is a string (substituting a key
+// if needed).
+func (f Formatter) sanitize(kvList []interface{}) []interface{} {
+	if len(kvList)%2 != 0 {
+		kvList = append(kvList, noValue)
+	}
+	for i := 0; i < len(kvList); i += 2 {
+		_, ok := kvList[i].(string)
+		if !ok {
+			kvList[i] = f.nonStringKey(kvList[i])
+		}
+	}
+	return kvList
+}
+
+// Init configures this Formatter from runtime info, such as the call depth
+// imposed by logr itself.
+// Note that this receiver is a pointer, so depth can be saved.
+func (f *Formatter) Init(info logr.RuntimeInfo) {
+	f.depth += info.CallDepth
+}
+
+// Enabled checks whether an info message at the given level should be logged.
+func (f Formatter) Enabled(level int) bool {
+	return level <= f.opts.Verbosity
+}
+
+// GetDepth returns the current depth of this Formatter.  This is useful for
+// implementations which do their own caller attribution.
+func (f Formatter) GetDepth() int {
+	return f.depth
+}
+
+// FormatInfo renders an Info log message into strings.  The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
+	args := make([]interface{}, 0, 64) // using a constant here impacts perf
+	prefix = f.prefix
+	if f.outputFormat == outputJSON {
+		args = append(args, "logger", prefix)
+		prefix = ""
+	}
+	if f.opts.LogTimestamp {
+		args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+	}
+	if policy := f.opts.LogCaller; policy == All || policy == Info {
+		args = append(args, "caller", f.caller())
+	}
+	args = append(args, "level", level, "msg", msg)
+	return prefix, f.render(args, kvList)
+}
+
+// FormatError renders an Error log message into strings.  The prefix will be
+// empty when no names were set (via AddNames),  or when the output is
+// configured for JSON.
+func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
+	args := make([]interface{}, 0, 64) // using a constant here impacts perf
+	prefix = f.prefix
+	if f.outputFormat == outputJSON {
+		args = append(args, "logger", prefix)
+		prefix = ""
+	}
+	if f.opts.LogTimestamp {
+		args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+	}
+	if policy := f.opts.LogCaller; policy == All || policy == Error {
+		args = append(args, "caller", f.caller())
+	}
+	args = append(args, "msg", msg)
+	var loggableErr interface{}
+	if err != nil {
+		loggableErr = err.Error()
+	}
+	args = append(args, "error", loggableErr)
+	return f.prefix, f.render(args, kvList)
+}
+
+// AddName appends the specified name.  funcr uses '/' characters to separate
+// name elements.  Callers should not pass '/' in the provided name string, but
+// this library does not actually enforce that.
+func (f *Formatter) AddName(name string) {
+	if len(f.prefix) > 0 {
+		f.prefix += "/"
+	}
+	f.prefix += name
+}
+
+// AddValues adds key-value pairs to the set of saved values to be logged with
+// each log line.
+func (f *Formatter) AddValues(kvList []interface{}) {
+	// Three slice args forces a copy.
+	n := len(f.values)
+	f.values = append(f.values[:n:n], kvList...)
+
+	vals := f.values
+	if hook := f.opts.RenderValuesHook; hook != nil {
+		vals = hook(f.sanitize(vals))
+	}
+
+	// Pre-render values, so we don't have to do it on each Info/Error call.
+	buf := bytes.NewBuffer(make([]byte, 0, 1024))
+	f.flatten(buf, vals, false, true) // escape user-provided keys
+	f.valuesStr = buf.String()
+}
+
+// AddCallDepth increases the number of stack-frames to skip when attributing
+// the log line to a file and line.
+func (f *Formatter) AddCallDepth(depth int) {
+	f.depth += depth
+}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
new file mode 100644
index 000000000..c3b56b3d2
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -0,0 +1,510 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This design derives from Dave Cheney's blog:
+//     http://dave.cheney.net/2015/11/05/lets-talk-about-logging
+
+// Package logr defines a general-purpose logging API and abstract interfaces
+// to back that API.  Packages in the Go ecosystem can depend on this package,
+// while callers can implement logging with whatever backend is appropriate.
+//
+// Usage
+//
+// Logging is done using a Logger instance.  Logger is a concrete type with
+// methods, which defers the actual logging to a LogSink interface.  The main
+// methods of Logger are Info() and Error().  Arguments to Info() and Error()
+// are key/value pairs rather than printf-style formatted strings, emphasizing
+// "structured logging".
+//
+// With Go's standard log package, we might write:
+//   log.Printf("setting target value %s", targetValue)
+//
+// With logr's structured logging, we'd write:
+//   logger.Info("setting target", "value", targetValue)
+//
+// Errors are much the same.  Instead of:
+//   log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// We'd write:
+//   logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// Info() and Error() are very similar, but they are separate methods so that
+// LogSink implementations can choose to do things like attach additional
+// information (such as stack traces) on calls to Error(). Error() messages are
+// always logged, regardless of the current verbosity.  If there is no error
+// instance available, passing nil is valid.
+//
+// Verbosity
+//
+// Often we want to log information only when the application in "verbose
+// mode".  To write log lines that are more verbose, Logger has a V() method.
+// The higher the V-level of a log line, the less critical it is considered.
+// Log-lines with V-levels that are not enabled (as per the LogSink) will not
+// be written.  Level V(0) is the default, and logger.V(0).Info() has the same
+// meaning as logger.Info().  Negative V-levels have the same meaning as V(0).
+// Error messages do not have a verbosity level and are always logged.
+//
+// Where we might have written:
+//   if flVerbose >= 2 {
+//       log.Printf("an unusual thing happened")
+//   }
+//
+// We can write:
+//   logger.V(2).Info("an unusual thing happened")
+//
+// Logger Names
+//
+// Logger instances can have name strings so that all messages logged through
+// that instance have additional context.  For example, you might want to add
+// a subsystem name:
+//
+//   logger.WithName("compactor").Info("started", "time", time.Now())
+//
+// The WithName() method returns a new Logger, which can be passed to
+// constructors or other functions for further use.  Repeated use of WithName()
+// will accumulate name "segments".  These name segments will be joined in some
+// way by the LogSink implementation.  It is strongly recommended that name
+// segments contain simple identifiers (letters, digits, and hyphen), and do
+// not contain characters that could muddle the log output or confuse the
+// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
+// quotes, etc).
+//
+// Saved Values
+//
+// Logger instances can store any number of key/value pairs, which will be
+// logged alongside all messages logged through that instance.  For example,
+// you might want to create a Logger instance per managed object:
+//
+// With the standard log package, we might write:
+//   log.Printf("decided to set field foo to value %q for object %s/%s",
+//       targetValue, object.Namespace, object.Name)
+//
+// With logr we'd write:
+//   // Elsewhere: set up the logger to log the object name.
+//   obj.logger = mainLogger.WithValues(
+//       "name", obj.name, "namespace", obj.namespace)
+//
+//   // later on...
+//   obj.logger.Info("setting foo", "value", targetValue)
+//
+// Best Practices
+//
+// Logger has very few hard rules, with the goal that LogSink implementations
+// might have a lot of freedom to differentiate.  There are, however, some
+// things to consider.
+//
+// The log message consists of a constant message attached to the log line.
+// This should generally be a simple description of what's occurring, and should
+// never be a format string.  Variable information can then be attached using
+// named values.
+//
+// Keys are arbitrary strings, but should generally be constant values.  Values
+// may be any Go value, but how the value is formatted is determined by the
+// LogSink implementation.
+//
+// Logger instances are meant to be passed around by value. Code that receives
+// such a value can call its methods without having to check whether the
+// instance is ready for use.
+//
+// Calling methods with the null logger (Logger{}) as instance will crash
+// because it has no LogSink. Therefore this null logger should never be passed
+// around. For cases where passing a logger is optional, a pointer to Logger
+// should be used.
+//
+// Key Naming Conventions
+//
+// Keys are not strictly required to conform to any specification or regex, but
+// it is recommended that they:
+//   * be human-readable and meaningful (not auto-generated or simple ordinals)
+//   * be constant (not dependent on input data)
+//   * contain only printable characters
+//   * not contain whitespace or punctuation
+//   * use lower case for simple keys and lowerCamelCase for more complex ones
+//
+// These guidelines help ensure that log data is processed properly regardless
+// of the log implementation.  For example, log implementations will try to
+// output JSON data or will store data for later database (e.g. SQL) queries.
+//
+// While users are generally free to use key names of their choice, it's
+// generally best to avoid using the following keys, as they're frequently used
+// by implementations:
+//   * "caller": the calling information (file/line) of a particular log line
+//   * "error": the underlying error value in the `Error` method
+//   * "level": the log level
+//   * "logger": the name of the associated logger
+//   * "msg": the log message
+//   * "stacktrace": the stack trace associated with a particular log line or
+//                   error (often from the `Error` message)
+//   * "ts": the timestamp for a log line
+//
+// Implementations are encouraged to make use of these keys to represent the
+// above concepts, when necessary (for example, in a pure-JSON output form, it
+// would be necessary to represent at least message and timestamp as ordinary
+// named values).
+//
+// Break Glass
+//
+// Implementations may choose to give callers access to the underlying
+// logging implementation.  The recommended pattern for this is:
+//   // Underlier exposes access to the underlying logging implementation.
+//   // Since callers only have a logr.Logger, they have to know which
+//   // implementation is in use, so this interface is less of an abstraction
+//   // and more of way to test type conversion.
+//   type Underlier interface {
+//       GetUnderlying() <underlying-type>
+//   }
+//
+// Logger grants access to the sink to enable type assertions like this:
+//   func DoSomethingWithImpl(log logr.Logger) {
+//       if underlier, ok := log.GetSink()(impl.Underlier) {
+//          implLogger := underlier.GetUnderlying()
+//          ...
+//       }
+//   }
+//
+// Custom `With*` functions can be implemented by copying the complete
+// Logger struct and replacing the sink in the copy:
+//   // WithFooBar changes the foobar parameter in the log sink and returns a
+//   // new logger with that modified sink.  It does nothing for loggers where
+//   // the sink doesn't support that parameter.
+//   func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+//      if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
+//         log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+//      }
+//      return log
+//   }
+//
+// Don't use New to construct a new Logger with a LogSink retrieved from an
+// existing Logger. Source code attribution might not work correctly and
+// unexported fields in Logger get lost.
+//
+// Beware that the same LogSink instance may be shared by different logger
+// instances. Calling functions that modify the LogSink will affect all of
+// those.
+package logr
+
+import (
+	"context"
+)
+
+// New returns a new Logger instance.  This is primarily used by libraries
+// implementing LogSink, rather than end users.
+func New(sink LogSink) Logger {
+	logger := Logger{}
+	logger.setSink(sink)
+	sink.Init(runtimeInfo)
+	return logger
+}
+
+// setSink stores the sink and updates any related fields. It mutates the
+// logger and thus is only safe to use for loggers that are not currently being
+// used concurrently.
+func (l *Logger) setSink(sink LogSink) {
+	l.sink = sink
+}
+
+// GetSink returns the stored sink.
+func (l Logger) GetSink() LogSink {
+	return l.sink
+}
+
+// WithSink returns a copy of the logger with the new sink.
+func (l Logger) WithSink(sink LogSink) Logger {
+	l.setSink(sink)
+	return l
+}
+
+// Logger is an interface to an abstract logging implementation.  This is a
+// concrete type for performance reasons, but all the real work is passed on to
+// a LogSink.  Implementations of LogSink should provide their own constructors
+// that return Logger, not LogSink.
+//
+// The underlying sink can be accessed through GetSink and be modified through
+// WithSink. This enables the implementation of custom extensions (see "Break
+// Glass" in the package documentation). Normally the sink should be used only
+// indirectly.
+type Logger struct {
+	sink  LogSink
+	level int
+}
+
+// Enabled tests whether this Logger is enabled.  For example, commandline
+// flags might be used to set the logging verbosity and disable some info logs.
+func (l Logger) Enabled() bool {
+	return l.sink.Enabled(l.level)
+}
+
+// Info logs a non-error message with the given key/value pairs as context.
+//
+// The msg argument should be used to add some constant description to the log
+// line.  The key/value pairs can then be used to add additional variable
+// information.  The key/value pairs must alternate string keys and arbitrary
+// values.
+func (l Logger) Info(msg string, keysAndValues ...interface{}) {
+	if l.Enabled() {
+		if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+			withHelper.GetCallStackHelper()()
+		}
+		l.sink.Info(l.level, msg, keysAndValues...)
+	}
+}
+
+// Error logs an error, with the given message and key/value pairs as context.
+// It functions similarly to Info, but may have unique behavior, and should be
+// preferred for logging errors (see the package documentations for more
+// information). The log message will always be emitted, regardless of
+// verbosity level.
+//
+// The msg argument should be used to add context to any underlying error,
+// while the err argument should be used to attach the actual error that
+// triggered this log line, if present. The err parameter is optional
+// and nil may be passed instead of an error instance.
+func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
+	if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+		withHelper.GetCallStackHelper()()
+	}
+	l.sink.Error(err, msg, keysAndValues...)
+}
+
+// V returns a new Logger instance for a specific verbosity level, relative to
+// this Logger.  In other words, V-levels are additive.  A higher verbosity
+// level means a log message is less important.  Negative V-levels are treated
+// as 0.
+func (l Logger) V(level int) Logger {
+	if level < 0 {
+		level = 0
+	}
+	l.level += level
+	return l
+}
+
+// WithValues returns a new Logger instance with additional key/value pairs.
+// See Info for documentation on how key/value pairs work.
+func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
+	l.setSink(l.sink.WithValues(keysAndValues...))
+	return l
+}
+
+// WithName returns a new Logger instance with the specified name element added
+// to the Logger's name.  Successive calls with WithName append additional
+// suffixes to the Logger's name.  It's strongly recommended that name segments
+// contain only letters, digits, and hyphens (see the package documentation for
+// more information).
+func (l Logger) WithName(name string) Logger {
+	l.setSink(l.sink.WithName(name))
+	return l
+}
+
+// WithCallDepth returns a Logger instance that offsets the call stack by the
+// specified number of frames when logging call site information, if possible.
+// This is useful for users who have helper functions between the "real" call
+// site and the actual calls to Logger methods.  If depth is 0 the attribution
+// should be to the direct caller of this function.  If depth is 1 the
+// attribution should skip 1 call frame, and so on.  Successive calls to this
+// are additive.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// it will be called and the result returned.  If the implementation does not
+// support CallDepthLogSink, the original Logger will be returned.
+//
+// To skip one level, WithCallStackHelper() should be used instead of
+// WithCallDepth(1) because it works with implementions that support the
+// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
+func (l Logger) WithCallDepth(depth int) Logger {
+	if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+		l.setSink(withCallDepth.WithCallDepth(depth))
+	}
+	return l
+}
+
+// WithCallStackHelper returns a new Logger instance that skips the direct
+// caller when logging call site information, if possible.  This is useful for
+// users who have helper functions between the "real" call site and the actual
+// calls to Logger methods and want to support loggers which depend on marking
+// each individual helper function, like loggers based on testing.T.
+//
+// In addition to using that new logger instance, callers also must call the
+// returned function.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// WithCallDepth(1) will be called to produce a new logger. If it supports a
+// WithCallStackHelper() method, that will be also called. If the
+// implementation does not support either of these, the original Logger will be
+// returned.
+func (l Logger) WithCallStackHelper() (func(), Logger) {
+	var helper func()
+	if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+		l.setSink(withCallDepth.WithCallDepth(1))
+	}
+	if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+		helper = withHelper.GetCallStackHelper()
+	} else {
+		helper = func() {}
+	}
+	return helper, l
+}
+
+// contextKey is how we find Loggers in a context.Context.
+type contextKey struct{}
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+		return v, nil
+	}
+
+	return Logger{}, notFoundError{}
+}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+	return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+	return true
+}
+
+// FromContextOrDiscard returns a Logger from ctx.  If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+		return v
+	}
+
+	return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+	return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// RuntimeInfo holds information that the logr "core" library knows which
+// LogSinks might want to know.
+type RuntimeInfo struct {
+	// CallDepth is the number of call frames the logr library adds between the
+	// end-user and the LogSink.  LogSink implementations which choose to print
+	// the original logging site (e.g. file & line) should climb this many
+	// additional frames to find it.
+	CallDepth int
+}
+
+// runtimeInfo is a static global.  It must not be changed at run time.
+var runtimeInfo = RuntimeInfo{
+	CallDepth: 1,
+}
+
+// LogSink represents a logging implementation.  End-users will generally not
+// interact with this type.
+type LogSink interface {
+	// Init receives optional information about the logr library for LogSink
+	// implementations that need it.
+	Init(info RuntimeInfo)
+
+	// Enabled tests whether this LogSink is enabled at the specified V-level.
+	// For example, commandline flags might be used to set the logging
+	// verbosity and disable some info logs.
+	Enabled(level int) bool
+
+	// Info logs a non-error message with the given key/value pairs as context.
+	// The level argument is provided for optional logging.  This method will
+	// only be called when Enabled(level) is true. See Logger.Info for more
+	// details.
+	Info(level int, msg string, keysAndValues ...interface{})
+
+	// Error logs an error, with the given message and key/value pairs as
+	// context.  See Logger.Error for more details.
+	Error(err error, msg string, keysAndValues ...interface{})
+
+	// WithValues returns a new LogSink with additional key/value pairs.  See
+	// Logger.WithValues for more details.
+	WithValues(keysAndValues ...interface{}) LogSink
+
+	// WithName returns a new LogSink with the specified name appended.  See
+	// Logger.WithName for more details.
+	WithName(name string) LogSink
+}
+
+// CallDepthLogSink represents a Logger that knows how to climb the call stack
+// to identify the original call site and can offset the depth by a specified
+// number of frames.  This is useful for users who have helper functions
+// between the "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as file,
+// function, or line) would otherwise log information about the intermediate
+// helper functions.
+//
+// This is an optional interface and implementations are not required to
+// support it.
+type CallDepthLogSink interface {
+	// WithCallDepth returns a LogSink that will offset the call
+	// stack by the specified number of frames when logging call
+	// site information.
+	//
+	// If depth is 0, the LogSink should skip exactly the number
+	// of call frames defined in RuntimeInfo.CallDepth when Info
+	// or Error are called, i.e. the attribution should be to the
+	// direct caller of Logger.Info or Logger.Error.
+	//
+	// If depth is 1 the attribution should skip 1 call frame, and so on.
+	// Successive calls to this are additive.
+	WithCallDepth(depth int) LogSink
+}
+
+// CallStackHelperLogSink represents a Logger that knows how to climb
+// the call stack to identify the original call site and can skip
+// intermediate helper functions if they mark themselves as
+// helper. Go's testing package uses that approach.
+//
+// This is useful for users who have helper functions between the
+// "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as
+// file, function, or line) would otherwise log information about the
+// intermediate helper functions.
+//
+// This is an optional interface and implementations are not required
+// to support it. Implementations that choose to support this must not
+// simply implement it as WithCallDepth(1), because
+// Logger.WithCallStackHelper will call both methods if they are
+// present. This should only be implemented for LogSinks that actually
+// need it, as with testing.T.
+type CallStackHelperLogSink interface {
+	// GetCallStackHelper returns a function that must be called
+	// to mark the direct caller as helper function when logging
+	// call site information.
+	GetCallStackHelper() func()
+}
+
+// Marshaler is an optional interface that logged values may choose to
+// implement. Loggers with structured output, such as JSON, should
+// log the object return by the MarshalLog method instead of the
+// original value.
+type Marshaler interface {
+	// MarshalLog can be used to:
+	//   - ensure that structs are not logged as strings when the original
+	//     value has a String method: return a different type without a
+	//     String method
+	//   - select which fields of a complex type should get logged:
+	//     return a simpler struct with fewer fields
+	//   - log unexported fields: return a different struct
+	//     with exported fields
+	//
+	// It may return any value of any type.
+	MarshalLog() interface{}
+}
diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md
new file mode 100644
index 000000000..515866789
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/README.md
@@ -0,0 +1,6 @@
+# Minimal Go logging using logr and Go's standard library
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr)
+
+This package implements the [logr interface](https://github.com/go-logr/logr)
+in terms of Go's standard log package(https://pkg.go.dev/log).
diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go
new file mode 100644
index 000000000..93a8aab51
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/stdr.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package stdr implements github.com/go-logr/logr.Logger in terms of
+// Go's standard log package.
+package stdr
+
+import (
+	"log"
+	"os"
+
+	"github.com/go-logr/logr"
+	"github.com/go-logr/logr/funcr"
+)
+
+// The global verbosity level.  See SetVerbosity().
+var globalVerbosity int
+
+// SetVerbosity sets the global level against which all info logs will be
+// compared.  If this is greater than or equal to the "V" of the logger, the
+// message will be logged.  A higher value here means more logs will be written.
+// The previous verbosity value is returned.  This is not concurrent-safe -
+// callers must be sure to call it from only one goroutine.
+func SetVerbosity(v int) int {
+	old := globalVerbosity
+	globalVerbosity = v
+	return old
+}
+
+// New returns a logr.Logger which is implemented by Go's standard log package,
+// or something like it.  If std is nil, this will use a default logger
+// instead.
+//
+// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+func New(std StdLogger) logr.Logger {
+	return NewWithOptions(std, Options{})
+}
+
+// NewWithOptions returns a logr.Logger which is implemented by Go's standard
+// log package, or something like it.  See New for details.
+func NewWithOptions(std StdLogger, opts Options) logr.Logger {
+	if std == nil {
+		// Go's log.Default() is only available in 1.16 and higher.
+		std = log.New(os.Stderr, "", log.LstdFlags)
+	}
+
+	if opts.Depth < 0 {
+		opts.Depth = 0
+	}
+
+	fopts := funcr.Options{
+		LogCaller: funcr.MessageClass(opts.LogCaller),
+	}
+
+	sl := &logger{
+		Formatter: funcr.NewFormatter(fopts),
+		std:       std,
+	}
+
+	// For skipping our own logger.Info/Error.
+	sl.Formatter.AddCallDepth(1 + opts.Depth)
+
+	return logr.New(sl)
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+	// Depth biases the assumed number of call frames to the "true" caller.
+	// This is useful when the calling code calls a function which then calls
+	// stdr (e.g. a logging shim to another API).  Values less than zero will
+	// be treated as zero.
+	Depth int
+
+	// LogCaller tells stdr to add a "caller" key to some or all log lines.
+	// Go's log package has options to log this natively, too.
+	LogCaller MessageClass
+
+	// TODO: add an option to log the date/time
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+	// None ignores all message classes.
+	None MessageClass = iota
+	// All considers all message classes.
+	All
+	// Info only considers info messages.
+	Info
+	// Error only considers error messages.
+	Error
+)
+
+// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
+// this adapter.
+type StdLogger interface {
+	// Output is the same as log.Output and log.Logger.Output.
+	Output(calldepth int, logline string) error
+}
+
+type logger struct {
+	funcr.Formatter
+	std StdLogger
+}
+
+var _ logr.LogSink = &logger{}
+var _ logr.CallDepthLogSink = &logger{}
+
+func (l logger) Enabled(level int) bool {
+	return globalVerbosity >= level
+}
+
+func (l logger) Info(level int, msg string, kvList ...interface{}) {
+	prefix, args := l.FormatInfo(level, msg, kvList)
+	if prefix != "" {
+		args = prefix + ": " + args
+	}
+	_ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) Error(err error, msg string, kvList ...interface{}) {
+	prefix, args := l.FormatError(err, msg, kvList)
+	if prefix != "" {
+		args = prefix + ": " + args
+	}
+	_ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) WithName(name string) logr.LogSink {
+	l.Formatter.AddName(name)
+	return &l
+}
+
+func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
+	l.Formatter.AddValues(kvList)
+	return &l
+}
+
+func (l logger) WithCallDepth(depth int) logr.LogSink {
+	l.Formatter.AddCallDepth(depth)
+	return &l
+}
+
+// Underlier exposes access to the underlying logging implementation.  Since
+// callers only have a logr.Logger, they have to know which implementation is
+// in use, so this interface is less of an abstraction and more of way to test
+// type conversion.
+type Underlier interface {
+	GetUnderlying() StdLogger
+}
+
+// GetUnderlying returns the StdLogger underneath this logger.  Since StdLogger
+// is itself an interface, the result may or may not be a Go log.Logger.
+func (l logger) GetUnderlying() StdLogger {
+	return l.std
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go
new file mode 100644
index 000000000..60e82caa9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonpb
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/encoding/protojson"
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapJSONUnmarshalV2 = false
+
+// UnmarshalNext unmarshals the next JSON object from d into m.
+func UnmarshalNext(d *json.Decoder, m proto.Message) error {
+	return new(Unmarshaler).UnmarshalNext(d, m)
+}
+
+// Unmarshal unmarshals a JSON object from r into m.
+func Unmarshal(r io.Reader, m proto.Message) error {
+	return new(Unmarshaler).Unmarshal(r, m)
+}
+
+// UnmarshalString unmarshals a JSON object from s into m.
+func UnmarshalString(s string, m proto.Message) error {
+	return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+	// AllowUnknownFields specifies whether to allow messages to contain
+	// unknown JSON fields, as opposed to failing to unmarshal.
+	AllowUnknownFields bool
+
+	// AnyResolver is used to resolve the google.protobuf.Any well-known type.
+	// If unset, the global registry is used by default.
+	AnyResolver AnyResolver
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
+// they are unmarshaled from JSON. Messages that implement this should also
+// implement JSONPBMarshaler so that the custom format can be produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+//
+// Deprecated: Custom types should implement protobuf reflection instead.
+type JSONPBUnmarshaler interface {
+	UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Unmarshal unmarshals a JSON object from r into m.
+func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
+	return u.UnmarshalNext(json.NewDecoder(r), m)
+}
+
+// UnmarshalNext unmarshals the next JSON object from d into m.
+func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
+	if m == nil {
+		return errors.New("invalid nil message")
+	}
+
+	// Parse the next JSON object from the stream.
+	raw := json.RawMessage{}
+	if err := d.Decode(&raw); err != nil {
+		return err
+	}
+
+	// Check for custom unmarshalers first since they may not properly
+	// implement protobuf reflection that the logic below relies on.
+	if jsu, ok := m.(JSONPBUnmarshaler); ok {
+		return jsu.UnmarshalJSONPB(u, raw)
+	}
+
+	mr := proto.MessageReflect(m)
+
+	// NOTE: For historical reasons, a top-level null is treated as a noop.
+	// This is incorrect, but kept for compatibility.
+	if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
+		return nil
+	}
+
+	if wrapJSONUnmarshalV2 {
+		// NOTE: If input message is non-empty, we need to preserve merge semantics
+		// of the old jsonpb implementation. These semantics are not supported by
+		// the protobuf JSON specification.
+		isEmpty := true
+		mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
+			isEmpty = false // at least one iteration implies non-empty
+			return false
+		})
+		if !isEmpty {
+			// Perform unmarshaling into a newly allocated, empty message.
+			mr = mr.New()
+
+			// Use a defer to copy all unmarshaled fields into the original message.
+			dst := proto.MessageReflect(m)
+			defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+				dst.Set(fd, v)
+				return true
+			})
+		}
+
+		// Unmarshal using the v2 JSON unmarshaler.
+		opts := protojson.UnmarshalOptions{
+			DiscardUnknown: u.AllowUnknownFields,
+		}
+		if u.AnyResolver != nil {
+			opts.Resolver = anyResolver{u.AnyResolver}
+		}
+		return opts.Unmarshal(raw, mr.Interface())
+	} else {
+		if err := u.unmarshalMessage(mr, raw); err != nil {
+			return err
+		}
+		return protoV2.CheckInitialized(mr.Interface())
+	}
+}
+
+func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
+	md := m.Descriptor()
+	fds := md.Fields()
+
+	if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
+		return jsu.UnmarshalJSONPB(u, in)
+	}
+
+	if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
+		return nil
+	}
+
+	switch wellKnownType(md.FullName()) {
+	case "Any":
+		var jsonObject map[string]json.RawMessage
+		if err := json.Unmarshal(in, &jsonObject); err != nil {
+			return err
+		}
+
+		rawTypeURL, ok := jsonObject["@type"]
+		if !ok {
+			return errors.New("Any JSON doesn't have '@type'")
+		}
+		typeURL, err := unquoteString(string(rawTypeURL))
+		if err != nil {
+			return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
+		}
+		m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
+
+		var m2 protoreflect.Message
+		if u.AnyResolver != nil {
+			mi, err := u.AnyResolver.Resolve(typeURL)
+			if err != nil {
+				return err
+			}
+			m2 = proto.MessageReflect(mi)
+		} else {
+			mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
+			if err != nil {
+				if err == protoregistry.NotFound {
+					return fmt.Errorf("could not resolve Any message type: %v", typeURL)
+				}
+				return err
+			}
+			m2 = mt.New()
+		}
+
+		if wellKnownType(m2.Descriptor().FullName()) != "" {
+			rawValue, ok := jsonObject["value"]
+			if !ok {
+				return errors.New("Any JSON doesn't have 'value'")
+			}
+			if err := u.unmarshalMessage(m2, rawValue); err != nil {
+				return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
+			}
+		} else {
+			delete(jsonObject, "@type")
+			rawJSON, err := json.Marshal(jsonObject)
+			if err != nil {
+				return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+			}
+			if err = u.unmarshalMessage(m2, rawJSON); err != nil {
+				return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
+			}
+		}
+
+		rawWire, err := protoV2.Marshal(m2.Interface())
+		if err != nil {
+			return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
+		}
+		m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
+		return nil
+	case "BoolValue", "BytesValue", "StringValue",
+		"Int32Value", "UInt32Value", "FloatValue",
+		"Int64Value", "UInt64Value", "DoubleValue":
+		fd := fds.ByNumber(1)
+		v, err := u.unmarshalValue(m.NewField(fd), in, fd)
+		if err != nil {
+			return err
+		}
+		m.Set(fd, v)
+		return nil
+	case "Duration":
+		v, err := unquoteString(string(in))
+		if err != nil {
+			return err
+		}
+		d, err := time.ParseDuration(v)
+		if err != nil {
+			return fmt.Errorf("bad Duration: %v", err)
+		}
+
+		sec := d.Nanoseconds() / 1e9
+		nsec := d.Nanoseconds() % 1e9
+		m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
+		m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
+		return nil
+	case "Timestamp":
+		v, err := unquoteString(string(in))
+		if err != nil {
+			return err
+		}
+		t, err := time.Parse(time.RFC3339Nano, v)
+		if err != nil {
+			return fmt.Errorf("bad Timestamp: %v", err)
+		}
+
+		sec := t.Unix()
+		nsec := t.Nanosecond()
+		m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
+		m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
+		return nil
+	case "Value":
+		switch {
+		case string(in) == "null":
+			m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
+		case string(in) == "true":
+			m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
+		case string(in) == "false":
+			m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
+		case hasPrefixAndSuffix('"', in, '"'):
+			s, err := unquoteString(string(in))
+			if err != nil {
+				return fmt.Errorf("unrecognized type for Value %q", in)
+			}
+			m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
+		case hasPrefixAndSuffix('[', in, ']'):
+			v := m.Mutable(fds.ByNumber(6))
+			return u.unmarshalMessage(v.Message(), in)
+		case hasPrefixAndSuffix('{', in, '}'):
+			v := m.Mutable(fds.ByNumber(5))
+			return u.unmarshalMessage(v.Message(), in)
+		default:
+			f, err := strconv.ParseFloat(string(in), 0)
+			if err != nil {
+				return fmt.Errorf("unrecognized type for Value %q", in)
+			}
+			m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
+		}
+		return nil
+	case "ListValue":
+		var jsonArray []json.RawMessage
+		if err := json.Unmarshal(in, &jsonArray); err != nil {
+			return fmt.Errorf("bad ListValue: %v", err)
+		}
+
+		lv := m.Mutable(fds.ByNumber(1)).List()
+		for _, raw := range jsonArray {
+			ve := lv.NewElement()
+			if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
+				return err
+			}
+			lv.Append(ve)
+		}
+		return nil
+	case "Struct":
+		var jsonObject map[string]json.RawMessage
+		if err := json.Unmarshal(in, &jsonObject); err != nil {
+			return fmt.Errorf("bad StructValue: %v", err)
+		}
+
+		mv := m.Mutable(fds.ByNumber(1)).Map()
+		for key, raw := range jsonObject {
+			kv := protoreflect.ValueOf(key).MapKey()
+			vv := mv.NewValue()
+			if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
+				return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
+			}
+			mv.Set(kv, vv)
+		}
+		return nil
+	}
+
+	var jsonObject map[string]json.RawMessage
+	if err := json.Unmarshal(in, &jsonObject); err != nil {
+		return err
+	}
+
+	// Handle known fields.
+	for i := 0; i < fds.Len(); i++ {
+		fd := fds.Get(i)
+		if fd.IsWeak() && fd.Message().IsPlaceholder() {
+			continue //  weak reference is not linked in
+		}
+
+		// Search for any raw JSON value associated with this field.
+		var raw json.RawMessage
+		name := string(fd.Name())
+		if fd.Kind() == protoreflect.GroupKind {
+			name = string(fd.Message().Name())
+		}
+		if v, ok := jsonObject[name]; ok {
+			delete(jsonObject, name)
+			raw = v
+		}
+		name = string(fd.JSONName())
+		if v, ok := jsonObject[name]; ok {
+			delete(jsonObject, name)
+			raw = v
+		}
+
+		field := m.NewField(fd)
+		// Unmarshal the field value.
+		if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
+			continue
+		}
+		v, err := u.unmarshalValue(field, raw, fd)
+		if err != nil {
+			return err
+		}
+		m.Set(fd, v)
+	}
+
+	// Handle extension fields.
+	for name, raw := range jsonObject {
+		if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
+			continue
+		}
+
+		// Resolve the extension field by name.
+		xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
+		xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+		if xt == nil && isMessageSet(md) {
+			xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+		}
+		if xt == nil {
+			continue
+		}
+		delete(jsonObject, name)
+		fd := xt.TypeDescriptor()
+		if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+			return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
+		}
+
+		field := m.NewField(fd)
+		// Unmarshal the field value.
+		if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
+			continue
+		}
+		v, err := u.unmarshalValue(field, raw, fd)
+		if err != nil {
+			return err
+		}
+		m.Set(fd, v)
+	}
+
+	if !u.AllowUnknownFields && len(jsonObject) > 0 {
+		for name := range jsonObject {
+			return fmt.Errorf("unknown field %q in %v", name, md.FullName())
+		}
+	}
+	return nil
+}
+
+func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
+	if md := fd.Message(); md != nil {
+		return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
+	}
+	return false
+}
+
+func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
+	if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
+		_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
+		return ok
+	}
+	return false
+}
+
+func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	switch {
+	case fd.IsList():
+		var jsonArray []json.RawMessage
+		if err := json.Unmarshal(in, &jsonArray); err != nil {
+			return v, err
+		}
+		lv := v.List()
+		for _, raw := range jsonArray {
+			ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
+			if err != nil {
+				return v, err
+			}
+			lv.Append(ve)
+		}
+		return v, nil
+	case fd.IsMap():
+		var jsonObject map[string]json.RawMessage
+		if err := json.Unmarshal(in, &jsonObject); err != nil {
+			return v, err
+		}
+		kfd := fd.MapKey()
+		vfd := fd.MapValue()
+		mv := v.Map()
+		for key, raw := range jsonObject {
+			var kv protoreflect.MapKey
+			if kfd.Kind() == protoreflect.StringKind {
+				kv = protoreflect.ValueOf(key).MapKey()
+			} else {
+				v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
+				if err != nil {
+					return v, err
+				}
+				kv = v.MapKey()
+			}
+
+			vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
+			if err != nil {
+				return v, err
+			}
+			mv.Set(kv, vv)
+		}
+		return v, nil
+	default:
+		return u.unmarshalSingularValue(v, in, fd)
+	}
+}
+
+var nonFinite = map[string]float64{
+	`"NaN"`:       math.NaN(),
+	`"Infinity"`:  math.Inf(+1),
+	`"-Infinity"`: math.Inf(-1),
+}
+
+func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	switch fd.Kind() {
+	case protoreflect.BoolKind:
+		return unmarshalValue(in, new(bool))
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		return unmarshalValue(trimQuote(in), new(int32))
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		return unmarshalValue(trimQuote(in), new(int64))
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		return unmarshalValue(trimQuote(in), new(uint32))
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		return unmarshalValue(trimQuote(in), new(uint64))
+	case protoreflect.FloatKind:
+		if f, ok := nonFinite[string(in)]; ok {
+			return protoreflect.ValueOfFloat32(float32(f)), nil
+		}
+		return unmarshalValue(trimQuote(in), new(float32))
+	case protoreflect.DoubleKind:
+		if f, ok := nonFinite[string(in)]; ok {
+			return protoreflect.ValueOfFloat64(float64(f)), nil
+		}
+		return unmarshalValue(trimQuote(in), new(float64))
+	case protoreflect.StringKind:
+		return unmarshalValue(in, new(string))
+	case protoreflect.BytesKind:
+		return unmarshalValue(in, new([]byte))
+	case protoreflect.EnumKind:
+		if hasPrefixAndSuffix('"', in, '"') {
+			vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
+			if vd == nil {
+				return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
+			}
+			return protoreflect.ValueOfEnum(vd.Number()), nil
+		}
+		return unmarshalValue(in, new(protoreflect.EnumNumber))
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		err := u.unmarshalMessage(v.Message(), in)
+		return v, err
+	default:
+		panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+	}
+}
+
+func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
+	err := json.Unmarshal(in, v)
+	return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
+}
+
+func unquoteString(in string) (out string, err error) {
+	err = json.Unmarshal([]byte(in), &out)
+	return out, err
+}
+
+func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
+	if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
+		return true
+	}
+	return false
+}
+
+// trimQuote is like unquoteString but simply strips surrounding quotes.
+// This is incorrect, but is behavior done by the legacy implementation.
+func trimQuote(in []byte) []byte {
+	if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
+		in = in[1 : len(in)-1]
+	}
+	return in
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go
new file mode 100644
index 000000000..685c80a62
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go
@@ -0,0 +1,559 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonpb
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/encoding/protojson"
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapJSONMarshalV2 = false
+
+// Marshaler is a configurable object for marshaling protocol buffer messages
+// to the specified JSON representation.
+type Marshaler struct {
+	// OrigName specifies whether to use the original protobuf name for fields.
+	OrigName bool
+
+	// EnumsAsInts specifies whether to render enum values as integers,
+	// as opposed to string values.
+	EnumsAsInts bool
+
+	// EmitDefaults specifies whether to render fields with zero values.
+	EmitDefaults bool
+
+	// Indent controls whether the output is compact or not.
+	// If empty, the output is compact JSON. Otherwise, every JSON object
+	// entry and JSON array value will be on its own line.
+	// Each line will be preceded by repeated copies of Indent, where the
+	// number of copies is the current indentation depth.
+	Indent string
+
+	// AnyResolver is used to resolve the google.protobuf.Any well-known type.
+	// If unset, the global registry is used by default.
+	AnyResolver AnyResolver
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should also
+// implement JSONPBUnmarshaler so that the custom format can be parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+//
+// Deprecated: Custom types should implement protobuf reflection instead.
+type JSONPBMarshaler interface {
+	MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// Marshal serializes a protobuf message as JSON into w.
+func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
+	b, err := jm.marshal(m)
+	if len(b) > 0 {
+		if _, err := w.Write(b); err != nil {
+			return err
+		}
+	}
+	return err
+}
+
+// MarshalToString serializes a protobuf message as JSON in string form.
+func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
+	b, err := jm.marshal(m)
+	if err != nil {
+		return "", err
+	}
+	return string(b), nil
+}
+
+func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
+	v := reflect.ValueOf(m)
+	if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+		return nil, errors.New("Marshal called with nil")
+	}
+
+	// Check for custom marshalers first since they may not properly
+	// implement protobuf reflection that the logic below relies on.
+	if jsm, ok := m.(JSONPBMarshaler); ok {
+		return jsm.MarshalJSONPB(jm)
+	}
+
+	if wrapJSONMarshalV2 {
+		opts := protojson.MarshalOptions{
+			UseProtoNames:   jm.OrigName,
+			UseEnumNumbers:  jm.EnumsAsInts,
+			EmitUnpopulated: jm.EmitDefaults,
+			Indent:          jm.Indent,
+		}
+		if jm.AnyResolver != nil {
+			opts.Resolver = anyResolver{jm.AnyResolver}
+		}
+		return opts.Marshal(proto.MessageReflect(m).Interface())
+	} else {
+		// Check for unpopulated required fields first.
+		m2 := proto.MessageReflect(m)
+		if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
+			return nil, err
+		}
+
+		w := jsonWriter{Marshaler: jm}
+		err := w.marshalMessage(m2, "", "")
+		return w.buf, err
+	}
+}
+
+type jsonWriter struct {
+	*Marshaler
+	buf []byte
+}
+
+func (w *jsonWriter) write(s string) {
+	w.buf = append(w.buf, s...)
+}
+
+func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
+	if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
+		b, err := jsm.MarshalJSONPB(w.Marshaler)
+		if err != nil {
+			return err
+		}
+		if typeURL != "" {
+			// we are marshaling this object to an Any type
+			var js map[string]*json.RawMessage
+			if err = json.Unmarshal(b, &js); err != nil {
+				return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
+			}
+			turl, err := json.Marshal(typeURL)
+			if err != nil {
+				return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+			}
+			js["@type"] = (*json.RawMessage)(&turl)
+			if b, err = json.Marshal(js); err != nil {
+				return err
+			}
+		}
+		w.write(string(b))
+		return nil
+	}
+
+	md := m.Descriptor()
+	fds := md.Fields()
+
+	// Handle well-known types.
+	const secondInNanos = int64(time.Second / time.Nanosecond)
+	switch wellKnownType(md.FullName()) {
+	case "Any":
+		return w.marshalAny(m, indent)
+	case "BoolValue", "BytesValue", "StringValue",
+		"Int32Value", "UInt32Value", "FloatValue",
+		"Int64Value", "UInt64Value", "DoubleValue":
+		fd := fds.ByNumber(1)
+		return w.marshalValue(fd, m.Get(fd), indent)
+	case "Duration":
+		const maxSecondsInDuration = 315576000000
+		// "Generated output always contains 0, 3, 6, or 9 fractional digits,
+		//  depending on required precision."
+		s := m.Get(fds.ByNumber(1)).Int()
+		ns := m.Get(fds.ByNumber(2)).Int()
+		if s < -maxSecondsInDuration || s > maxSecondsInDuration {
+			return fmt.Errorf("seconds out of range %v", s)
+		}
+		if ns <= -secondInNanos || ns >= secondInNanos {
+			return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+		}
+		if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+			return errors.New("signs of seconds and nanos do not match")
+		}
+		var sign string
+		if s < 0 || ns < 0 {
+			sign, s, ns = "-", -1*s, -1*ns
+		}
+		x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
+		x = strings.TrimSuffix(x, "000")
+		x = strings.TrimSuffix(x, "000")
+		x = strings.TrimSuffix(x, ".000")
+		w.write(fmt.Sprintf(`"%vs"`, x))
+		return nil
+	case "Timestamp":
+		// "RFC 3339, where generated output will always be Z-normalized
+		//  and uses 0, 3, 6 or 9 fractional digits."
+		s := m.Get(fds.ByNumber(1)).Int()
+		ns := m.Get(fds.ByNumber(2)).Int()
+		if ns < 0 || ns >= secondInNanos {
+			return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+		}
+		t := time.Unix(s, ns).UTC()
+		// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+		x := t.Format("2006-01-02T15:04:05.000000000")
+		x = strings.TrimSuffix(x, "000")
+		x = strings.TrimSuffix(x, "000")
+		x = strings.TrimSuffix(x, ".000")
+		w.write(fmt.Sprintf(`"%vZ"`, x))
+		return nil
+	case "Value":
+		// JSON value; which is a null, number, string, bool, object, or array.
+		od := md.Oneofs().Get(0)
+		fd := m.WhichOneof(od)
+		if fd == nil {
+			return errors.New("nil Value")
+		}
+		return w.marshalValue(fd, m.Get(fd), indent)
+	case "Struct", "ListValue":
+		// JSON object or array.
+		fd := fds.ByNumber(1)
+		return w.marshalValue(fd, m.Get(fd), indent)
+	}
+
+	w.write("{")
+	if w.Indent != "" {
+		w.write("\n")
+	}
+
+	firstField := true
+	if typeURL != "" {
+		if err := w.marshalTypeURL(indent, typeURL); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	for i := 0; i < fds.Len(); {
+		fd := fds.Get(i)
+		if od := fd.ContainingOneof(); od != nil {
+			fd = m.WhichOneof(od)
+			i += od.Fields().Len()
+			if fd == nil {
+				continue
+			}
+		} else {
+			i++
+		}
+
+		v := m.Get(fd)
+
+		if !m.Has(fd) {
+			if !w.EmitDefaults || fd.ContainingOneof() != nil {
+				continue
+			}
+			if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
+				v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
+			}
+		}
+
+		if !firstField {
+			w.writeComma()
+		}
+		if err := w.marshalField(fd, v, indent); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	// Handle proto2 extensions.
+	if md.ExtensionRanges().Len() > 0 {
+		// Collect a sorted list of all extension descriptor and values.
+		type ext struct {
+			desc protoreflect.FieldDescriptor
+			val  protoreflect.Value
+		}
+		var exts []ext
+		m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+			if fd.IsExtension() {
+				exts = append(exts, ext{fd, v})
+			}
+			return true
+		})
+		sort.Slice(exts, func(i, j int) bool {
+			return exts[i].desc.Number() < exts[j].desc.Number()
+		})
+
+		for _, ext := range exts {
+			if !firstField {
+				w.writeComma()
+			}
+			if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
+				return err
+			}
+			firstField = false
+		}
+	}
+
+	if w.Indent != "" {
+		w.write("\n")
+		w.write(indent)
+	}
+	w.write("}")
+	return nil
+}
+
+func (w *jsonWriter) writeComma() {
+	if w.Indent != "" {
+		w.write(",\n")
+	} else {
+		w.write(",")
+	}
+}
+
+func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
+	// "If the Any contains a value that has a special JSON mapping,
+	//  it will be converted as follows: {"@type": xxx, "value": yyy}.
+	//  Otherwise, the value will be converted into a JSON object,
+	//  and the "@type" field will be inserted to indicate the actual data type."
+	md := m.Descriptor()
+	typeURL := m.Get(md.Fields().ByNumber(1)).String()
+	rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
+
+	var m2 protoreflect.Message
+	if w.AnyResolver != nil {
+		mi, err := w.AnyResolver.Resolve(typeURL)
+		if err != nil {
+			return err
+		}
+		m2 = proto.MessageReflect(mi)
+	} else {
+		mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
+		if err != nil {
+			return err
+		}
+		m2 = mt.New()
+	}
+
+	if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
+		return err
+	}
+
+	if wellKnownType(m2.Descriptor().FullName()) == "" {
+		return w.marshalMessage(m2, indent, typeURL)
+	}
+
+	w.write("{")
+	if w.Indent != "" {
+		w.write("\n")
+	}
+	if err := w.marshalTypeURL(indent, typeURL); err != nil {
+		return err
+	}
+	w.writeComma()
+	if w.Indent != "" {
+		w.write(indent)
+		w.write(w.Indent)
+		w.write(`"value": `)
+	} else {
+		w.write(`"value":`)
+	}
+	if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
+		return err
+	}
+	if w.Indent != "" {
+		w.write("\n")
+		w.write(indent)
+	}
+	w.write("}")
+	return nil
+}
+
+func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
+	if w.Indent != "" {
+		w.write(indent)
+		w.write(w.Indent)
+	}
+	w.write(`"@type":`)
+	if w.Indent != "" {
+		w.write(" ")
+	}
+	b, err := json.Marshal(typeURL)
+	if err != nil {
+		return err
+	}
+	w.write(string(b))
+	return nil
+}
+
+// marshalField writes field description and value to the Writer.
+func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+	if w.Indent != "" {
+		w.write(indent)
+		w.write(w.Indent)
+	}
+	w.write(`"`)
+	switch {
+	case fd.IsExtension():
+		// For message set, use the fname of the message as the extension name.
+		name := string(fd.FullName())
+		if isMessageSet(fd.ContainingMessage()) {
+			name = strings.TrimSuffix(name, ".message_set_extension")
+		}
+
+		w.write("[" + name + "]")
+	case w.OrigName:
+		name := string(fd.Name())
+		if fd.Kind() == protoreflect.GroupKind {
+			name = string(fd.Message().Name())
+		}
+		w.write(name)
+	default:
+		w.write(string(fd.JSONName()))
+	}
+	w.write(`":`)
+	if w.Indent != "" {
+		w.write(" ")
+	}
+	return w.marshalValue(fd, v, indent)
+}
+
+func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+	switch {
+	case fd.IsList():
+		w.write("[")
+		comma := ""
+		lv := v.List()
+		for i := 0; i < lv.Len(); i++ {
+			w.write(comma)
+			if w.Indent != "" {
+				w.write("\n")
+				w.write(indent)
+				w.write(w.Indent)
+				w.write(w.Indent)
+			}
+			if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
+				return err
+			}
+			comma = ","
+		}
+		if w.Indent != "" {
+			w.write("\n")
+			w.write(indent)
+			w.write(w.Indent)
+		}
+		w.write("]")
+		return nil
+	case fd.IsMap():
+		kfd := fd.MapKey()
+		vfd := fd.MapValue()
+		mv := v.Map()
+
+		// Collect a sorted list of all map keys and values.
+		type entry struct{ key, val protoreflect.Value }
+		var entries []entry
+		mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+			entries = append(entries, entry{k.Value(), v})
+			return true
+		})
+		sort.Slice(entries, func(i, j int) bool {
+			switch kfd.Kind() {
+			case protoreflect.BoolKind:
+				return !entries[i].key.Bool() && entries[j].key.Bool()
+			case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+				return entries[i].key.Int() < entries[j].key.Int()
+			case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+				return entries[i].key.Uint() < entries[j].key.Uint()
+			case protoreflect.StringKind:
+				return entries[i].key.String() < entries[j].key.String()
+			default:
+				panic("invalid kind")
+			}
+		})
+
+		w.write(`{`)
+		comma := ""
+		for _, entry := range entries {
+			w.write(comma)
+			if w.Indent != "" {
+				w.write("\n")
+				w.write(indent)
+				w.write(w.Indent)
+				w.write(w.Indent)
+			}
+
+			s := fmt.Sprint(entry.key.Interface())
+			b, err := json.Marshal(s)
+			if err != nil {
+				return err
+			}
+			w.write(string(b))
+
+			w.write(`:`)
+			if w.Indent != "" {
+				w.write(` `)
+			}
+
+			if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
+				return err
+			}
+			comma = ","
+		}
+		if w.Indent != "" {
+			w.write("\n")
+			w.write(indent)
+			w.write(w.Indent)
+		}
+		w.write(`}`)
+		return nil
+	default:
+		return w.marshalSingularValue(fd, v, indent)
+	}
+}
+
+func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
+	switch {
+	case !v.IsValid():
+		w.write("null")
+		return nil
+	case fd.Message() != nil:
+		return w.marshalMessage(v.Message(), indent+w.Indent, "")
+	case fd.Enum() != nil:
+		if fd.Enum().FullName() == "google.protobuf.NullValue" {
+			w.write("null")
+			return nil
+		}
+
+		vd := fd.Enum().Values().ByNumber(v.Enum())
+		if vd == nil || w.EnumsAsInts {
+			w.write(strconv.Itoa(int(v.Enum())))
+		} else {
+			w.write(`"` + string(vd.Name()) + `"`)
+		}
+		return nil
+	default:
+		switch v.Interface().(type) {
+		case float32, float64:
+			switch {
+			case math.IsInf(v.Float(), +1):
+				w.write(`"Infinity"`)
+				return nil
+			case math.IsInf(v.Float(), -1):
+				w.write(`"-Infinity"`)
+				return nil
+			case math.IsNaN(v.Float()):
+				w.write(`"NaN"`)
+				return nil
+			}
+		case int64, uint64:
+			w.write(fmt.Sprintf(`"%d"`, v.Interface()))
+			return nil
+		}
+
+		b, err := json.Marshal(v.Interface())
+		if err != nil {
+			return err
+		}
+		w.write(string(b))
+		return nil
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go
new file mode 100644
index 000000000..480e2448d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/json.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jsonpb provides functionality to marshal and unmarshal between a
+// protocol buffer message and JSON. It follows the specification at
+// https://developers.google.com/protocol-buffers/docs/proto3#json.
+//
+// Do not rely on the default behavior of the standard encoding/json package
+// when called on generated message types as it does not operate correctly.
+//
+// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
+// package instead.
+package jsonpb
+
+import (
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// AnyResolver takes a type URL, present in an Any message,
+// and resolves it into an instance of the associated message.
+type AnyResolver interface {
+	Resolve(typeURL string) (proto.Message, error)
+}
+
+type anyResolver struct{ AnyResolver }
+
+func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+	return r.FindMessageByURL(string(message))
+}
+
+func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+	m, err := r.Resolve(url)
+	if err != nil {
+		return nil, err
+	}
+	return protoimpl.X.MessageTypeOf(m), nil
+}
+
+func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+func wellKnownType(s protoreflect.FullName) string {
+	if s.Parent() == "google.protobuf" {
+		switch s.Name() {
+		case "Empty", "Any",
+			"BoolValue", "BytesValue", "StringValue",
+			"Int32Value", "UInt32Value", "FloatValue",
+			"Int64Value", "UInt64Value", "DoubleValue",
+			"Duration", "Timestamp",
+			"NullValue", "Struct", "Value", "ListValue":
+			return string(s.Name())
+		}
+	}
+	return ""
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+	ms, ok := md.(interface{ IsMessageSet() bool })
+	return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 000000000..85f9f5736
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,179 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+
+	anypb "github.com/golang/protobuf/ptypes/any"
+)
+
+const urlPrefix = "type.googleapis.com/"
+
+// AnyMessageName returns the message name contained in an anypb.Any message.
+// Most type assertions should use the Is function instead.
+//
+// Deprecated: Call the any.MessageName method instead.
+func AnyMessageName(any *anypb.Any) (string, error) {
+	name, err := anyMessageName(any)
+	return string(name), err
+}
+func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
+	if any == nil {
+		return "", fmt.Errorf("message is nil")
+	}
+	name := protoreflect.FullName(any.TypeUrl)
+	if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
+		name = name[i+len("/"):]
+	}
+	if !name.IsValid() {
+		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+	}
+	return name, nil
+}
+
+// MarshalAny marshals the given message m into an anypb.Any message.
+//
+// Deprecated: Call the anypb.New function instead.
+func MarshalAny(m proto.Message) (*anypb.Any, error) {
+	switch dm := m.(type) {
+	case DynamicAny:
+		m = dm.Message
+	case *DynamicAny:
+		if dm == nil {
+			return nil, proto.ErrNil
+		}
+		m = dm.Message
+	}
+	b, err := proto.Marshal(m)
+	if err != nil {
+		return nil, err
+	}
+	return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
+}
+
+// Empty returns a new message of the type specified in an anypb.Any message.
+// It returns protoregistry.NotFound if the corresponding message type could not
+// be resolved in the global registry.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
+// to resolve the message name and create a new instance of it.
+func Empty(any *anypb.Any) (proto.Message, error) {
+	name, err := anyMessageName(any)
+	if err != nil {
+		return nil, err
+	}
+	mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
+	if err != nil {
+		return nil, err
+	}
+	return proto.MessageV1(mt.New().Interface()), nil
+}
+
+// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
+// into the provided message m. It returns an error if the target message
+// does not match the type in the Any message or if an unmarshal error occurs.
+//
+// The target message m may be a *DynamicAny message. If the underlying message
+// type could not be resolved, then this returns protoregistry.NotFound.
+//
+// Deprecated: Call the any.UnmarshalTo method instead.
+func UnmarshalAny(any *anypb.Any, m proto.Message) error {
+	if dm, ok := m.(*DynamicAny); ok {
+		if dm.Message == nil {
+			var err error
+			dm.Message, err = Empty(any)
+			if err != nil {
+				return err
+			}
+		}
+		m = dm.Message
+	}
+
+	anyName, err := AnyMessageName(any)
+	if err != nil {
+		return err
+	}
+	msgName := proto.MessageName(m)
+	if anyName != msgName {
+		return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
+	}
+	return proto.Unmarshal(any.Value, m)
+}
+
+// Is reports whether the Any message contains a message of the specified type.
+//
+// Deprecated: Call the any.MessageIs method instead.
+func Is(any *anypb.Any, m proto.Message) bool {
+	if any == nil || m == nil {
+		return false
+	}
+	name := proto.MessageName(m)
+	if !strings.HasSuffix(any.TypeUrl, name) {
+		return false
+	}
+	return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in an anypb.Any message.
+// The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//   var x ptypes.DynamicAny
+//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+//   fmt.Printf("unmarshaled message: %v", x.Message)
+//
+// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
+// the any message contents into a new instance of the underlying message.
+type DynamicAny struct{ proto.Message }
+
+func (m DynamicAny) String() string {
+	if m.Message == nil {
+		return "<nil>"
+	}
+	return m.Message.String()
+}
+func (m DynamicAny) Reset() {
+	if m.Message == nil {
+		return
+	}
+	m.Message.Reset()
+}
+func (m DynamicAny) ProtoMessage() {
+	return
+}
+func (m DynamicAny) ProtoReflect() protoreflect.Message {
+	if m.Message == nil {
+		return nil
+	}
+	return dynamicAny{proto.MessageReflect(m.Message)}
+}
+
+type dynamicAny struct{ protoreflect.Message }
+
+func (m dynamicAny) Type() protoreflect.MessageType {
+	return dynamicAnyType{m.Message.Type()}
+}
+func (m dynamicAny) New() protoreflect.Message {
+	return dynamicAnyType{m.Message.Type()}.New()
+}
+func (m dynamicAny) Interface() protoreflect.ProtoMessage {
+	return DynamicAny{proto.MessageV1(m.Message.Interface())}
+}
+
+type dynamicAnyType struct{ protoreflect.MessageType }
+
+func (t dynamicAnyType) New() protoreflect.Message {
+	return dynamicAny{t.MessageType.New()}
+}
+func (t dynamicAnyType) Zero() protoreflect.Message {
+	return dynamicAny{t.MessageType.Zero()}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 000000000..0ef27d33d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,62 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/any/any.proto
+
+package any
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	anypb "google.golang.org/protobuf/types/known/anypb"
+	reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/any.proto.
+
+type Any = anypb.Any
+
+var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
+	0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
+	0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+	0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
+	0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
+func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
+	file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 000000000..d3c33259d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ptypes provides functionality for interacting with well-known types.
+//
+// Deprecated: Well-known types have specialized functionality directly
+// injected into the generated packages for each message type.
+// See the deprecation notice for each function for the suggested alternative.
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 000000000..b2b55dd85
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,76 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	durationpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+// Range of google.protobuf.Duration as specified in duration.proto.
+// This is about 10,000 years in seconds.
+const (
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// Duration converts a durationpb.Duration to a time.Duration.
+// Duration returns an error if dur is invalid or overflows a time.Duration.
+//
+// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
+func Duration(dur *durationpb.Duration) (time.Duration, error) {
+	if err := validateDuration(dur); err != nil {
+		return 0, err
+	}
+	d := time.Duration(dur.Seconds) * time.Second
+	if int64(d/time.Second) != dur.Seconds {
+		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
+	}
+	if dur.Nanos != 0 {
+		d += time.Duration(dur.Nanos) * time.Nanosecond
+		if (d < 0) != (dur.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a durationpb.Duration.
+//
+// Deprecated: Call the durationpb.New function instead.
+func DurationProto(d time.Duration) *durationpb.Duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &durationpb.Duration{
+		Seconds: int64(secs),
+		Nanos:   int32(nanos),
+	}
+}
+
+// validateDuration determines whether the durationpb.Duration is valid
+// according to the definition in google/protobuf/duration.proto.
+// A valid durpb.Duration may still be too large to fit into a time.Duration
+// Note that the range of durationpb.Duration is about 10,000 years,
+// while the range of time.Duration is about 290 years.
+func validateDuration(dur *durationpb.Duration) error {
+	if dur == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %v: seconds out of range", dur)
+	}
+	if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %v: nanos out of range", dur)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
+		return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 000000000..d0079ee3e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,63 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/duration/duration.proto
+
+package duration
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+	reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/duration.proto.
+
+type Duration = durationpb.Duration
+
+var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
+	0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+	0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
+func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
+	file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 000000000..8368a3f70
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,112 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	timestamppb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+// Range of google.protobuf.Duration as specified in timestamp.proto.
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// Timestamp converts a timestamppb.Timestamp to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return
+// value is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+//
+// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
+func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+//
+// Deprecated: Call the timestamppb.Now function instead.
+func TimestampNow() *timestamppb.Timestamp {
+	ts, err := TimestampProto(time.Now())
+	if err != nil {
+		panic("ptypes: time.Now() out of Timestamp range")
+	}
+	return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+//
+// Deprecated: Call the timestamppb.New function instead.
+func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
+	ts := &timestamppb.Timestamp{
+		Seconds: t.Unix(),
+		Nanos:   int32(t.Nanosecond()),
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps.
+// For invalid Timestamps, it returns an error message in parentheses.
+//
+// Deprecated: Call the ts.AsTime method instead,
+// followed by a call to the Format method on the time.Time value.
+func TimestampString(ts *timestamppb.Timestamp) string {
+	t, err := Timestamp(ts)
+	if err != nil {
+		return fmt.Sprintf("(%v)", err)
+	}
+	return t.Format(time.RFC3339Nano)
+}
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
+// and has a Nanos field in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes the problem.
+//
+// Every valid Timestamp can be represented by a time.Time,
+// but the converse is not true.
+func validateTimestamp(ts *timestamppb.Timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 000000000..a76f80760
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,64 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+
+package timestamp
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/timestamp.proto.
+
+type Timestamp = timestamppb.Timestamp
+
+var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
+	0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
+	0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
+func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
+	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt
new file mode 100644
index 000000000..364516251
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    * Neither the name of Gengo, Inc. nor the names of its
+      contributors may be used to endorse or promote products derived from this
+      software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
new file mode 100644
index 000000000..f694f3c0d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
@@ -0,0 +1,35 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "httprule",
+    srcs = [
+        "compile.go",
+        "parse.go",
+        "types.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
+    deps = ["//utilities"],
+)
+
+go_test(
+    name = "httprule_test",
+    size = "small",
+    srcs = [
+        "compile_test.go",
+        "parse_test.go",
+        "types_test.go",
+    ],
+    embed = [":httprule"],
+    deps = [
+        "//utilities",
+        "@com_github_golang_glog//:glog",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":httprule",
+    visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
new file mode 100644
index 000000000..3cd937295
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
@@ -0,0 +1,121 @@
+package httprule
+
+import (
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+)
+
+const (
+	opcodeVersion = 1
+)
+
+// Template is a compiled representation of path templates.
+type Template struct {
+	// Version is the version number of the format.
+	Version int
+	// OpCodes is a sequence of operations.
+	OpCodes []int
+	// Pool is a constant pool
+	Pool []string
+	// Verb is a VERB part in the template.
+	Verb string
+	// Fields is a list of field paths bound in this template.
+	Fields []string
+	// Original template (example: /v1/a_bit_of_everything)
+	Template string
+}
+
+// Compiler compiles utilities representation of path templates into marshallable operations.
+// They can be unmarshalled by runtime.NewPattern.
+type Compiler interface {
+	Compile() Template
+}
+
+type op struct {
+	// code is the opcode of the operation
+	code utilities.OpCode
+
+	// str is a string operand of the code.
+	// num is ignored if str is not empty.
+	str string
+
+	// num is a numeric operand of the code.
+	num int
+}
+
+func (w wildcard) compile() []op {
+	return []op{
+		{code: utilities.OpPush},
+	}
+}
+
+func (w deepWildcard) compile() []op {
+	return []op{
+		{code: utilities.OpPushM},
+	}
+}
+
+func (l literal) compile() []op {
+	return []op{
+		{
+			code: utilities.OpLitPush,
+			str:  string(l),
+		},
+	}
+}
+
+func (v variable) compile() []op {
+	var ops []op
+	for _, s := range v.segments {
+		ops = append(ops, s.compile()...)
+	}
+	ops = append(ops, op{
+		code: utilities.OpConcatN,
+		num:  len(v.segments),
+	}, op{
+		code: utilities.OpCapture,
+		str:  v.path,
+	})
+
+	return ops
+}
+
+func (t template) Compile() Template {
+	var rawOps []op
+	for _, s := range t.segments {
+		rawOps = append(rawOps, s.compile()...)
+	}
+
+	var (
+		ops    []int
+		pool   []string
+		fields []string
+	)
+	consts := make(map[string]int)
+	for _, op := range rawOps {
+		ops = append(ops, int(op.code))
+		if op.str == "" {
+			ops = append(ops, op.num)
+		} else {
+			// eof segment literal represents the "/" path pattern
+			if op.str == eof {
+				op.str = ""
+			}
+			if _, ok := consts[op.str]; !ok {
+				consts[op.str] = len(pool)
+				pool = append(pool, op.str)
+			}
+			ops = append(ops, consts[op.str])
+		}
+		if op.code == utilities.OpCapture {
+			fields = append(fields, op.str)
+		}
+	}
+	return Template{
+		Version:  opcodeVersion,
+		OpCodes:  ops,
+		Pool:     pool,
+		Verb:     t.verb,
+		Fields:   fields,
+		Template: t.template,
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
new file mode 100644
index 000000000..138f7c12f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
@@ -0,0 +1,11 @@
+// +build gofuzz
+
+package httprule
+
+func Fuzz(data []byte) int {
+	_, err := Parse(string(data))
+	if err != nil {
+		return 0
+	}
+	return 0
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
new file mode 100644
index 000000000..5edd784e6
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
@@ -0,0 +1,368 @@
+package httprule
+
+import (
+	"fmt"
+	"strings"
+)
+
+// InvalidTemplateError indicates that the path template is not valid.
+type InvalidTemplateError struct {
+	tmpl string
+	msg  string
+}
+
+func (e InvalidTemplateError) Error() string {
+	return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
+}
+
+// Parse parses the string representation of path template
+func Parse(tmpl string) (Compiler, error) {
+	if !strings.HasPrefix(tmpl, "/") {
+		return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
+	}
+	tokens, verb := tokenize(tmpl[1:])
+
+	p := parser{tokens: tokens}
+	segs, err := p.topLevelSegments()
+	if err != nil {
+		return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
+	}
+
+	return template{
+		segments: segs,
+		verb:     verb,
+		template: tmpl,
+	}, nil
+}
+
+func tokenize(path string) (tokens []string, verb string) {
+	if path == "" {
+		return []string{eof}, ""
+	}
+
+	const (
+		init = iota
+		field
+		nested
+	)
+	st := init
+	for path != "" {
+		var idx int
+		switch st {
+		case init:
+			idx = strings.IndexAny(path, "/{")
+		case field:
+			idx = strings.IndexAny(path, ".=}")
+		case nested:
+			idx = strings.IndexAny(path, "/}")
+		}
+		if idx < 0 {
+			tokens = append(tokens, path)
+			break
+		}
+		switch r := path[idx]; r {
+		case '/', '.':
+		case '{':
+			st = field
+		case '=':
+			st = nested
+		case '}':
+			st = init
+		}
+		if idx == 0 {
+			tokens = append(tokens, path[idx:idx+1])
+		} else {
+			tokens = append(tokens, path[:idx], path[idx:idx+1])
+		}
+		path = path[idx+1:]
+	}
+
+	l := len(tokens)
+	// See
+	// https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
+	// although normal and backwards-compat logic here is to use the last index
+	// of a colon, if the final segment is a variable followed by a colon, the
+	// part following the colon must be a verb. Hence if the previous token is
+	// an end var marker, we switch the index we're looking for to Index instead
+	// of LastIndex, so that we correctly grab the remaining part of the path as
+	// the verb.
+	var penultimateTokenIsEndVar bool
+	switch l {
+	case 0, 1:
+		// Not enough to be variable so skip this logic and don't result in an
+		// invalid index
+	default:
+		penultimateTokenIsEndVar = tokens[l-2] == "}"
+	}
+	t := tokens[l-1]
+	var idx int
+	if penultimateTokenIsEndVar {
+		idx = strings.Index(t, ":")
+	} else {
+		idx = strings.LastIndex(t, ":")
+	}
+	if idx == 0 {
+		tokens, verb = tokens[:l-1], t[1:]
+	} else if idx > 0 {
+		tokens[l-1], verb = t[:idx], t[idx+1:]
+	}
+	tokens = append(tokens, eof)
+	return tokens, verb
+}
+
+// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
+type parser struct {
+	tokens   []string
+	accepted []string
+}
+
+// topLevelSegments is the target of this parser.
+func (p *parser) topLevelSegments() ([]segment, error) {
+	if _, err := p.accept(typeEOF); err == nil {
+		p.tokens = p.tokens[:0]
+		return []segment{literal(eof)}, nil
+	}
+	segs, err := p.segments()
+	if err != nil {
+		return nil, err
+	}
+	if _, err := p.accept(typeEOF); err != nil {
+		return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
+	}
+	return segs, nil
+}
+
+func (p *parser) segments() ([]segment, error) {
+	s, err := p.segment()
+	if err != nil {
+		return nil, err
+	}
+
+	segs := []segment{s}
+	for {
+		if _, err := p.accept("/"); err != nil {
+			return segs, nil
+		}
+		s, err := p.segment()
+		if err != nil {
+			return segs, err
+		}
+		segs = append(segs, s)
+	}
+}
+
+func (p *parser) segment() (segment, error) {
+	if _, err := p.accept("*"); err == nil {
+		return wildcard{}, nil
+	}
+	if _, err := p.accept("**"); err == nil {
+		return deepWildcard{}, nil
+	}
+	if l, err := p.literal(); err == nil {
+		return l, nil
+	}
+
+	v, err := p.variable()
+	if err != nil {
+		return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
+	}
+	return v, err
+}
+
+func (p *parser) literal() (segment, error) {
+	lit, err := p.accept(typeLiteral)
+	if err != nil {
+		return nil, err
+	}
+	return literal(lit), nil
+}
+
+func (p *parser) variable() (segment, error) {
+	if _, err := p.accept("{"); err != nil {
+		return nil, err
+	}
+
+	path, err := p.fieldPath()
+	if err != nil {
+		return nil, err
+	}
+
+	var segs []segment
+	if _, err := p.accept("="); err == nil {
+		segs, err = p.segments()
+		if err != nil {
+			return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
+		}
+	} else {
+		segs = []segment{wildcard{}}
+	}
+
+	if _, err := p.accept("}"); err != nil {
+		return nil, fmt.Errorf("unterminated variable segment: %s", path)
+	}
+	return variable{
+		path:     path,
+		segments: segs,
+	}, nil
+}
+
+func (p *parser) fieldPath() (string, error) {
+	c, err := p.accept(typeIdent)
+	if err != nil {
+		return "", err
+	}
+	components := []string{c}
+	for {
+		if _, err = p.accept("."); err != nil {
+			return strings.Join(components, "."), nil
+		}
+		c, err := p.accept(typeIdent)
+		if err != nil {
+			return "", fmt.Errorf("invalid field path component: %v", err)
+		}
+		components = append(components, c)
+	}
+}
+
+// A termType is a type of terminal symbols.
+type termType string
+
+// These constants define some of valid values of termType.
+// They improve readability of parse functions.
+//
+// You can also use "/", "*", "**", "." or "=" as valid values.
+const (
+	typeIdent   = termType("ident")
+	typeLiteral = termType("literal")
+	typeEOF     = termType("$")
+)
+
+const (
+	// eof is the terminal symbol which always appears at the end of token sequence.
+	eof = "\u0000"
+)
+
+// accept tries to accept a token in "p".
+// This function consumes a token and returns it if it matches to the specified "term".
+// If it doesn't match, the function does not consume any tokens and return an error.
+func (p *parser) accept(term termType) (string, error) {
+	t := p.tokens[0]
+	switch term {
+	case "/", "*", "**", ".", "=", "{", "}":
+		if t != string(term) && t != "/" {
+			return "", fmt.Errorf("expected %q but got %q", term, t)
+		}
+	case typeEOF:
+		if t != eof {
+			return "", fmt.Errorf("expected EOF but got %q", t)
+		}
+	case typeIdent:
+		if err := expectIdent(t); err != nil {
+			return "", err
+		}
+	case typeLiteral:
+		if err := expectPChars(t); err != nil {
+			return "", err
+		}
+	default:
+		return "", fmt.Errorf("unknown termType %q", term)
+	}
+	p.tokens = p.tokens[1:]
+	p.accepted = append(p.accepted, t)
+	return t, nil
+}
+
+// expectPChars determines if "t" consists of only pchars defined in RFC3986.
+//
+// https://www.ietf.org/rfc/rfc3986.txt, P.49
+//   pchar         = unreserved / pct-encoded / sub-delims / ":" / "@"
+//   unreserved    = ALPHA / DIGIT / "-" / "." / "_" / "~"
+//   sub-delims    = "!" / "$" / "&" / "'" / "(" / ")"
+//                 / "*" / "+" / "," / ";" / "="
+//   pct-encoded   = "%" HEXDIG HEXDIG
+func expectPChars(t string) error {
+	const (
+		init = iota
+		pct1
+		pct2
+	)
+	st := init
+	for _, r := range t {
+		if st != init {
+			if !isHexDigit(r) {
+				return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
+			}
+			switch st {
+			case pct1:
+				st = pct2
+			case pct2:
+				st = init
+			}
+			continue
+		}
+
+		// unreserved
+		switch {
+		case 'A' <= r && r <= 'Z':
+			continue
+		case 'a' <= r && r <= 'z':
+			continue
+		case '0' <= r && r <= '9':
+			continue
+		}
+		switch r {
+		case '-', '.', '_', '~':
+			// unreserved
+		case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
+			// sub-delims
+		case ':', '@':
+			// rest of pchar
+		case '%':
+			// pct-encoded
+			st = pct1
+		default:
+			return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
+		}
+	}
+	if st != init {
+		return fmt.Errorf("invalid percent-encoding in %q", t)
+	}
+	return nil
+}
+
+// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
+func expectIdent(ident string) error {
+	if ident == "" {
+		return fmt.Errorf("empty identifier")
+	}
+	for pos, r := range ident {
+		switch {
+		case '0' <= r && r <= '9':
+			if pos == 0 {
+				return fmt.Errorf("identifier starting with digit: %s", ident)
+			}
+			continue
+		case 'A' <= r && r <= 'Z':
+			continue
+		case 'a' <= r && r <= 'z':
+			continue
+		case r == '_':
+			continue
+		default:
+			return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
+		}
+	}
+	return nil
+}
+
+func isHexDigit(r rune) bool {
+	switch {
+	case '0' <= r && r <= '9':
+		return true
+	case 'A' <= r && r <= 'F':
+		return true
+	case 'a' <= r && r <= 'f':
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
new file mode 100644
index 000000000..5a814a000
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
@@ -0,0 +1,60 @@
+package httprule
+
+import (
+	"fmt"
+	"strings"
+)
+
+type template struct {
+	segments []segment
+	verb     string
+	template string
+}
+
+type segment interface {
+	fmt.Stringer
+	compile() (ops []op)
+}
+
+type wildcard struct{}
+
+type deepWildcard struct{}
+
+type literal string
+
+type variable struct {
+	path     string
+	segments []segment
+}
+
+func (wildcard) String() string {
+	return "*"
+}
+
+func (deepWildcard) String() string {
+	return "**"
+}
+
+func (l literal) String() string {
+	return string(l)
+}
+
+func (v variable) String() string {
+	var segs []string
+	for _, s := range v.segments {
+		segs = append(segs, s.String())
+	}
+	return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
+}
+
+func (t template) String() string {
+	var segs []string
+	for _, s := range t.segments {
+		segs = append(segs, s.String())
+	}
+	str := strings.Join(segs, "/")
+	if t.verb != "" {
+		str = fmt.Sprintf("%s:%s", str, t.verb)
+	}
+	return "/" + str
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
new file mode 100644
index 000000000..95f867a52
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -0,0 +1,91 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "runtime",
+    srcs = [
+        "context.go",
+        "convert.go",
+        "doc.go",
+        "errors.go",
+        "fieldmask.go",
+        "handler.go",
+        "marshal_httpbodyproto.go",
+        "marshal_json.go",
+        "marshal_jsonpb.go",
+        "marshal_proto.go",
+        "marshaler.go",
+        "marshaler_registry.go",
+        "mux.go",
+        "pattern.go",
+        "proto2_convert.go",
+        "query.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
+    deps = [
+        "//internal/httprule",
+        "//utilities",
+        "@go_googleapis//google/api:httpbody_go_proto",
+        "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+        "@org_golang_google_grpc//codes",
+        "@org_golang_google_grpc//grpclog",
+        "@org_golang_google_grpc//metadata",
+        "@org_golang_google_grpc//status",
+        "@org_golang_google_protobuf//encoding/protojson",
+        "@org_golang_google_protobuf//proto",
+        "@org_golang_google_protobuf//reflect/protoreflect",
+        "@org_golang_google_protobuf//reflect/protoregistry",
+        "@org_golang_google_protobuf//types/known/durationpb",
+        "@org_golang_google_protobuf//types/known/timestamppb",
+        "@org_golang_google_protobuf//types/known/wrapperspb",
+    ],
+)
+
+go_test(
+    name = "runtime_test",
+    size = "small",
+    srcs = [
+        "context_test.go",
+        "convert_test.go",
+        "errors_test.go",
+        "fieldmask_test.go",
+        "handler_test.go",
+        "marshal_httpbodyproto_test.go",
+        "marshal_json_test.go",
+        "marshal_jsonpb_test.go",
+        "marshal_proto_test.go",
+        "marshaler_registry_test.go",
+        "mux_test.go",
+        "pattern_test.go",
+        "query_test.go",
+    ],
+    embed = [":runtime"],
+    deps = [
+        "//runtime/internal/examplepb",
+        "//utilities",
+        "@com_github_google_go_cmp//cmp",
+        "@com_github_google_go_cmp//cmp/cmpopts",
+        "@go_googleapis//google/api:httpbody_go_proto",
+        "@go_googleapis//google/rpc:errdetails_go_proto",
+        "@go_googleapis//google/rpc:status_go_proto",
+        "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+        "@org_golang_google_grpc//codes",
+        "@org_golang_google_grpc//metadata",
+        "@org_golang_google_grpc//status",
+        "@org_golang_google_protobuf//encoding/protojson",
+        "@org_golang_google_protobuf//proto",
+        "@org_golang_google_protobuf//testing/protocmp",
+        "@org_golang_google_protobuf//types/known/durationpb",
+        "@org_golang_google_protobuf//types/known/emptypb",
+        "@org_golang_google_protobuf//types/known/structpb",
+        "@org_golang_google_protobuf//types/known/timestamppb",
+        "@org_golang_google_protobuf//types/known/wrapperspb",
+    ],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":runtime",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
new file mode 100644
index 000000000..fb57b9366
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
@@ -0,0 +1,345 @@
+package runtime
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"net"
+	"net/http"
+	"net/textproto"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// MetadataHeaderPrefix is the http prefix that represents custom metadata
+// parameters to or from a gRPC call.
+const MetadataHeaderPrefix = "Grpc-Metadata-"
+
+// MetadataPrefix is prepended to permanent HTTP header keys (as specified
+// by the IANA) when added to the gRPC context.
+const MetadataPrefix = "grpcgateway-"
+
+// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
+// HTTP headers in a response handled by grpc-gateway
+const MetadataTrailerPrefix = "Grpc-Trailer-"
+
+const metadataGrpcTimeout = "Grpc-Timeout"
+const metadataHeaderBinarySuffix = "-Bin"
+
+const xForwardedFor = "X-Forwarded-For"
+const xForwardedHost = "X-Forwarded-Host"
+
+var (
+	// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+	// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+	DefaultContextTimeout = 0 * time.Second
+)
+
+type (
+	rpcMethodKey       struct{}
+	httpPathPatternKey struct{}
+
+	AnnotateContextOption func(ctx context.Context) context.Context
+)
+
+func WithHTTPPathPattern(pattern string) AnnotateContextOption {
+	return func(ctx context.Context) context.Context {
+		return withHTTPPathPattern(ctx, pattern)
+	}
+}
+
+func decodeBinHeader(v string) ([]byte, error) {
+	if len(v)%4 == 0 {
+		// Input was padded, or padding was not necessary.
+		return base64.StdEncoding.DecodeString(v)
+	}
+	return base64.RawStdEncoding.DecodeString(v)
+}
+
+/*
+AnnotateContext adds context information such as metadata from the request.
+
+At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
+except that the forwarded destination is not another HTTP service but rather
+a gRPC service.
+*/
+func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+	ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+	if err != nil {
+		return nil, err
+	}
+	if md == nil {
+		return ctx, nil
+	}
+
+	return metadata.NewOutgoingContext(ctx, md), nil
+}
+
+// AnnotateIncomingContext adds context information such as metadata from the request.
+// Attach metadata as incoming context.
+func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+	ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+	if err != nil {
+		return nil, err
+	}
+	if md == nil {
+		return ctx, nil
+	}
+
+	return metadata.NewIncomingContext(ctx, md), nil
+}
+
+func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
+	ctx = withRPCMethod(ctx, rpcMethodName)
+	for _, o := range options {
+		ctx = o(ctx)
+	}
+	var pairs []string
+	timeout := DefaultContextTimeout
+	if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
+		var err error
+		timeout, err = timeoutDecode(tm)
+		if err != nil {
+			return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
+		}
+	}
+
+	for key, vals := range req.Header {
+		key = textproto.CanonicalMIMEHeaderKey(key)
+		for _, val := range vals {
+			// For backwards-compatibility, pass through 'authorization' header with no prefix.
+			if key == "Authorization" {
+				pairs = append(pairs, "authorization", val)
+			}
+			if h, ok := mux.incomingHeaderMatcher(key); ok {
+				// Handles "-bin" metadata in grpc, since grpc will do another base64
+				// encode before sending to server, we need to decode it first.
+				if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
+					b, err := decodeBinHeader(val)
+					if err != nil {
+						return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
+					}
+
+					val = string(b)
+				}
+				pairs = append(pairs, h, val)
+			}
+		}
+	}
+	if host := req.Header.Get(xForwardedHost); host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), host)
+	} else if req.Host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
+	}
+
+	if addr := req.RemoteAddr; addr != "" {
+		if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
+			if fwd := req.Header.Get(xForwardedFor); fwd == "" {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
+			} else {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
+			}
+		}
+	}
+
+	if timeout != 0 {
+		//nolint:govet  // The context outlives this function
+		ctx, _ = context.WithTimeout(ctx, timeout)
+	}
+	if len(pairs) == 0 {
+		return ctx, nil, nil
+	}
+	md := metadata.Pairs(pairs...)
+	for _, mda := range mux.metadataAnnotators {
+		md = metadata.Join(md, mda(ctx, req))
+	}
+	return ctx, md, nil
+}
+
+// ServerMetadata consists of metadata sent from gRPC server.
+type ServerMetadata struct {
+	HeaderMD  metadata.MD
+	TrailerMD metadata.MD
+}
+
+type serverMetadataKey struct{}
+
+// NewServerMetadataContext creates a new context with ServerMetadata
+func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+	return context.WithValue(ctx, serverMetadataKey{}, md)
+}
+
+// ServerMetadataFromContext returns the ServerMetadata in ctx
+func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+	md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
+	return
+}
+
+// ServerTransportStream implements grpc.ServerTransportStream.
+// It should only be used by the generated files to support grpc.SendHeader
+// outside of gRPC server use.
+type ServerTransportStream struct {
+	mu      sync.Mutex
+	header  metadata.MD
+	trailer metadata.MD
+}
+
+// Method returns the method for the stream.
+func (s *ServerTransportStream) Method() string {
+	return ""
+}
+
+// Header returns the header metadata of the stream.
+func (s *ServerTransportStream) Header() metadata.MD {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.header.Copy()
+}
+
+// SetHeader sets the header metadata.
+func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+
+	s.mu.Lock()
+	s.header = metadata.Join(s.header, md)
+	s.mu.Unlock()
+	return nil
+}
+
+// SendHeader sets the header metadata.
+func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
+	return s.SetHeader(md)
+}
+
+// Trailer returns the cached trailer metadata.
+func (s *ServerTransportStream) Trailer() metadata.MD {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.trailer.Copy()
+}
+
+// SetTrailer sets the trailer metadata.
+func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+
+	s.mu.Lock()
+	s.trailer = metadata.Join(s.trailer, md)
+	s.mu.Unlock()
+	return nil
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+	size := len(s)
+	if size < 2 {
+		return 0, fmt.Errorf("timeout string is too short: %q", s)
+	}
+	d, ok := timeoutUnitToDuration(s[size-1])
+	if !ok {
+		return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
+	}
+	t, err := strconv.ParseInt(s[:size-1], 10, 64)
+	if err != nil {
+		return 0, err
+	}
+	return d * time.Duration(t), nil
+}
+
+func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
+	switch u {
+	case 'H':
+		return time.Hour, true
+	case 'M':
+		return time.Minute, true
+	case 'S':
+		return time.Second, true
+	case 'm':
+		return time.Millisecond, true
+	case 'u':
+		return time.Microsecond, true
+	case 'n':
+		return time.Nanosecond, true
+	default:
+	}
+	return
+}
+
+// isPermanentHTTPHeader checks whether hdr belongs to the list of
+// permanent request headers maintained by IANA.
+// http://www.iana.org/assignments/message-headers/message-headers.xml
+func isPermanentHTTPHeader(hdr string) bool {
+	switch hdr {
+	case
+		"Accept",
+		"Accept-Charset",
+		"Accept-Language",
+		"Accept-Ranges",
+		"Authorization",
+		"Cache-Control",
+		"Content-Type",
+		"Cookie",
+		"Date",
+		"Expect",
+		"From",
+		"Host",
+		"If-Match",
+		"If-Modified-Since",
+		"If-None-Match",
+		"If-Schedule-Tag-Match",
+		"If-Unmodified-Since",
+		"Max-Forwards",
+		"Origin",
+		"Pragma",
+		"Referer",
+		"User-Agent",
+		"Via",
+		"Warning":
+		return true
+	}
+	return false
+}
+
+// RPCMethod returns the method string for the server context. The returned
+// string is in the format of "/package.service/method".
+func RPCMethod(ctx context.Context) (string, bool) {
+	m := ctx.Value(rpcMethodKey{})
+	if m == nil {
+		return "", false
+	}
+	ms, ok := m.(string)
+	if !ok {
+		return "", false
+	}
+	return ms, true
+}
+
+func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
+	return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
+}
+
+// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
+// The format of the returned string is defined by the google.api.http path template type.
+func HTTPPathPattern(ctx context.Context) (string, bool) {
+	m := ctx.Value(httpPathPatternKey{})
+	if m == nil {
+		return "", false
+	}
+	ms, ok := m.(string)
+	if !ok {
+		return "", false
+	}
+	return ms, true
+}
+
+func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
+	return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
new file mode 100644
index 000000000..e6bc4e6ce
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
@@ -0,0 +1,322 @@
+package runtime
+
+import (
+	"encoding/base64"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/types/known/durationpb"
+	"google.golang.org/protobuf/types/known/timestamppb"
+	"google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// String just returns the given string.
+// It is just for compatibility to other types.
+func String(val string) (string, error) {
+	return val, nil
+}
+
+// StringSlice converts 'val' where individual strings are separated by
+// 'sep' into a string slice.
+func StringSlice(val, sep string) ([]string, error) {
+	return strings.Split(val, sep), nil
+}
+
+// Bool converts the given string representation of a boolean value into bool.
+func Bool(val string) (bool, error) {
+	return strconv.ParseBool(val)
+}
+
+// BoolSlice converts 'val' where individual booleans are separated by
+// 'sep' into a bool slice.
+func BoolSlice(val, sep string) ([]bool, error) {
+	s := strings.Split(val, sep)
+	values := make([]bool, len(s))
+	for i, v := range s {
+		value, err := Bool(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float64 converts the given string representation into representation of a floating point number into float64.
+func Float64(val string) (float64, error) {
+	return strconv.ParseFloat(val, 64)
+}
+
+// Float64Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float64 slice.
+func Float64Slice(val, sep string) ([]float64, error) {
+	s := strings.Split(val, sep)
+	values := make([]float64, len(s))
+	for i, v := range s {
+		value, err := Float64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float32 converts the given string representation of a floating point number into float32.
+func Float32(val string) (float32, error) {
+	f, err := strconv.ParseFloat(val, 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(f), nil
+}
+
+// Float32Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float32 slice.
+func Float32Slice(val, sep string) ([]float32, error) {
+	s := strings.Split(val, sep)
+	values := make([]float32, len(s))
+	for i, v := range s {
+		value, err := Float32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int64 converts the given string representation of an integer into int64.
+func Int64(val string) (int64, error) {
+	return strconv.ParseInt(val, 0, 64)
+}
+
+// Int64Slice converts 'val' where individual integers are separated by
+// 'sep' into a int64 slice.
+func Int64Slice(val, sep string) ([]int64, error) {
+	s := strings.Split(val, sep)
+	values := make([]int64, len(s))
+	for i, v := range s {
+		value, err := Int64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int32 converts the given string representation of an integer into int32.
+func Int32(val string) (int32, error) {
+	i, err := strconv.ParseInt(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return int32(i), nil
+}
+
+// Int32Slice converts 'val' where individual integers are separated by
+// 'sep' into a int32 slice.
+func Int32Slice(val, sep string) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Int32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint64 converts the given string representation of an integer into uint64.
+func Uint64(val string) (uint64, error) {
+	return strconv.ParseUint(val, 0, 64)
+}
+
+// Uint64Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint64 slice.
+func Uint64Slice(val, sep string) ([]uint64, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint64, len(s))
+	for i, v := range s {
+		value, err := Uint64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint32 converts the given string representation of an integer into uint32.
+func Uint32(val string) (uint32, error) {
+	i, err := strconv.ParseUint(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return uint32(i), nil
+}
+
+// Uint32Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint32 slice.
+func Uint32Slice(val, sep string) ([]uint32, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint32, len(s))
+	for i, v := range s {
+		value, err := Uint32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Bytes converts the given string representation of a byte sequence into a slice of bytes
+// A bytes sequence is encoded in URL-safe base64 without padding
+func Bytes(val string) ([]byte, error) {
+	b, err := base64.StdEncoding.DecodeString(val)
+	if err != nil {
+		b, err = base64.URLEncoding.DecodeString(val)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return b, nil
+}
+
+// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
+// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
+func BytesSlice(val, sep string) ([][]byte, error) {
+	s := strings.Split(val, sep)
+	values := make([][]byte, len(s))
+	for i, v := range s {
+		value, err := Bytes(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
+func Timestamp(val string) (*timestamppb.Timestamp, error) {
+	var r timestamppb.Timestamp
+	val = strconv.Quote(strings.Trim(val, `"`))
+	unmarshaler := &protojson.UnmarshalOptions{}
+	err := unmarshaler.Unmarshal([]byte(val), &r)
+	if err != nil {
+		return nil, err
+	}
+	return &r, nil
+}
+
+// Duration converts the given string into a timestamp.Duration.
+func Duration(val string) (*durationpb.Duration, error) {
+	var r durationpb.Duration
+	val = strconv.Quote(strings.Trim(val, `"`))
+	unmarshaler := &protojson.UnmarshalOptions{}
+	err := unmarshaler.Unmarshal([]byte(val), &r)
+	if err != nil {
+		return nil, err
+	}
+	return &r, nil
+}
+
+// Enum converts the given string into an int32 that should be type casted into the
+// correct enum proto type.
+func Enum(val string, enumValMap map[string]int32) (int32, error) {
+	e, ok := enumValMap[val]
+	if ok {
+		return e, nil
+	}
+
+	i, err := Int32(val)
+	if err != nil {
+		return 0, fmt.Errorf("%s is not valid", val)
+	}
+	for _, v := range enumValMap {
+		if v == i {
+			return i, nil
+		}
+	}
+	return 0, fmt.Errorf("%s is not valid", val)
+}
+
+// EnumSlice converts 'val' where individual enums are separated by 'sep'
+// into a int32 slice. Each individual int32 should be type casted into the
+// correct enum proto type.
+func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Enum(v, enumValMap)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+/*
+	Support fot google.protobuf.wrappers on top of primitive types
+*/
+
+// StringValue well-known type support as wrapper around string type
+func StringValue(val string) (*wrapperspb.StringValue, error) {
+	return &wrapperspb.StringValue{Value: val}, nil
+}
+
+// FloatValue well-known type support as wrapper around float32 type
+func FloatValue(val string) (*wrapperspb.FloatValue, error) {
+	parsedVal, err := Float32(val)
+	return &wrapperspb.FloatValue{Value: parsedVal}, err
+}
+
+// DoubleValue well-known type support as wrapper around float64 type
+func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
+	parsedVal, err := Float64(val)
+	return &wrapperspb.DoubleValue{Value: parsedVal}, err
+}
+
+// BoolValue well-known type support as wrapper around bool type
+func BoolValue(val string) (*wrapperspb.BoolValue, error) {
+	parsedVal, err := Bool(val)
+	return &wrapperspb.BoolValue{Value: parsedVal}, err
+}
+
+// Int32Value well-known type support as wrapper around int32 type
+func Int32Value(val string) (*wrapperspb.Int32Value, error) {
+	parsedVal, err := Int32(val)
+	return &wrapperspb.Int32Value{Value: parsedVal}, err
+}
+
+// UInt32Value well-known type support as wrapper around uint32 type
+func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
+	parsedVal, err := Uint32(val)
+	return &wrapperspb.UInt32Value{Value: parsedVal}, err
+}
+
+// Int64Value well-known type support as wrapper around int64 type
+func Int64Value(val string) (*wrapperspb.Int64Value, error) {
+	parsedVal, err := Int64(val)
+	return &wrapperspb.Int64Value{Value: parsedVal}, err
+}
+
+// UInt64Value well-known type support as wrapper around uint64 type
+func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
+	parsedVal, err := Uint64(val)
+	return &wrapperspb.UInt64Value{Value: parsedVal}, err
+}
+
+// BytesValue well-known type support as wrapper around bytes[] type
+func BytesValue(val string) (*wrapperspb.BytesValue, error) {
+	parsedVal, err := Bytes(val)
+	return &wrapperspb.BytesValue{Value: parsedVal}, err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
new file mode 100644
index 000000000..b6e5ddf7a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
@@ -0,0 +1,5 @@
+/*
+Package runtime contains runtime helper functions used by
+servers which protoc-gen-grpc-gateway generates.
+*/
+package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
new file mode 100644
index 000000000..d9e0013c4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -0,0 +1,180 @@
+package runtime
+
+import (
+	"context"
+	"errors"
+	"io"
+	"net/http"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// ErrorHandlerFunc is the signature used to configure error handling.
+type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
+
+// StreamErrorHandlerFunc is the signature used to configure stream error handling.
+type StreamErrorHandlerFunc func(context.Context, error) *status.Status
+
+// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
+type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
+
+// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
+// passed to the DefaultRoutingErrorHandler.
+type HTTPStatusError struct {
+	HTTPStatus int
+	Err        error
+}
+
+func (e *HTTPStatusError) Error() string {
+	return e.Err.Error()
+}
+
+// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
+// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+func HTTPStatusFromCode(code codes.Code) int {
+	switch code {
+	case codes.OK:
+		return http.StatusOK
+	case codes.Canceled:
+		return http.StatusRequestTimeout
+	case codes.Unknown:
+		return http.StatusInternalServerError
+	case codes.InvalidArgument:
+		return http.StatusBadRequest
+	case codes.DeadlineExceeded:
+		return http.StatusGatewayTimeout
+	case codes.NotFound:
+		return http.StatusNotFound
+	case codes.AlreadyExists:
+		return http.StatusConflict
+	case codes.PermissionDenied:
+		return http.StatusForbidden
+	case codes.Unauthenticated:
+		return http.StatusUnauthorized
+	case codes.ResourceExhausted:
+		return http.StatusTooManyRequests
+	case codes.FailedPrecondition:
+		// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
+		return http.StatusBadRequest
+	case codes.Aborted:
+		return http.StatusConflict
+	case codes.OutOfRange:
+		return http.StatusBadRequest
+	case codes.Unimplemented:
+		return http.StatusNotImplemented
+	case codes.Internal:
+		return http.StatusInternalServerError
+	case codes.Unavailable:
+		return http.StatusServiceUnavailable
+	case codes.DataLoss:
+		return http.StatusInternalServerError
+	}
+
+	grpclog.Infof("Unknown gRPC error code: %v", code)
+	return http.StatusInternalServerError
+}
+
+// HTTPError uses the mux-configured error handler.
+func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+	mux.errorHandler(ctx, mux, marshaler, w, r, err)
+}
+
+// DefaultHTTPErrorHandler is the default error handler.
+// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
+// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
+// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
+// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
+// are insufficient for.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body written by this function is a Status message marshaled by the Marshaler.
+func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+	// return Internal when Marshal failed
+	const fallback = `{"code": 13, "message": "failed to marshal error message"}`
+
+	var customStatus *HTTPStatusError
+	if errors.As(err, &customStatus) {
+		err = customStatus.Err
+	}
+
+	s := status.Convert(err)
+	pb := s.Proto()
+
+	w.Header().Del("Trailer")
+	w.Header().Del("Transfer-Encoding")
+
+	contentType := marshaler.ContentType(pb)
+	w.Header().Set("Content-Type", contentType)
+
+	if s.Code() == codes.Unauthenticated {
+		w.Header().Set("WWW-Authenticate", s.Message())
+	}
+
+	buf, merr := marshaler.Marshal(pb)
+	if merr != nil {
+		grpclog.Infof("Failed to marshal error message %q: %v", s, merr)
+		w.WriteHeader(http.StatusInternalServerError)
+		if _, err := io.WriteString(w, fallback); err != nil {
+			grpclog.Infof("Failed to write response: %v", err)
+		}
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+
+	// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+	// Unless the request includes a TE header field indicating "trailers"
+	// is acceptable, as described in Section 4.3, a server SHOULD NOT
+	// generate trailer fields that it believes are necessary for the user
+	// agent to receive.
+	doForwardTrailers := requestAcceptsTrailers(r)
+
+	if doForwardTrailers {
+		handleForwardResponseTrailerHeader(w, md)
+		w.Header().Set("Transfer-Encoding", "chunked")
+	}
+
+	st := HTTPStatusFromCode(s.Code())
+	if customStatus != nil {
+		st = customStatus.HTTPStatus
+	}
+
+	w.WriteHeader(st)
+	if _, err := w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	if doForwardTrailers {
+		handleForwardResponseTrailer(w, md)
+	}
+}
+
+func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
+	return status.Convert(err)
+}
+
+// DefaultRoutingErrorHandler is our default handler for routing errors.
+// By default http error codes mapped on the following error codes:
+//   NotFound -> grpc.NotFound
+//   StatusBadRequest -> grpc.InvalidArgument
+//   MethodNotAllowed -> grpc.Unimplemented
+//   Other -> grpc.Internal, method is not expecting to be called for anything else
+func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
+	sterr := status.Error(codes.Internal, "Unexpected routing error")
+	switch httpStatus {
+	case http.StatusBadRequest:
+		sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
+	case http.StatusMethodNotAllowed:
+		sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
+	case http.StatusNotFound:
+		sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
+	}
+	mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
new file mode 100644
index 000000000..0138ed2f7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -0,0 +1,165 @@
+package runtime
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"sort"
+
+	"google.golang.org/genproto/protobuf/field_mask"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
+	fd := fields.ByName(protoreflect.Name(name))
+	if fd != nil {
+		return fd
+	}
+
+	return fields.ByJSONName(name)
+}
+
+// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
+func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
+	fm := &field_mask.FieldMask{}
+	var root interface{}
+
+	if err := json.NewDecoder(r).Decode(&root); err != nil {
+		if err == io.EOF {
+			return fm, nil
+		}
+		return nil, err
+	}
+
+	queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
+	for len(queue) > 0 {
+		// dequeue an item
+		item := queue[0]
+		queue = queue[1:]
+
+		m, ok := item.node.(map[string]interface{})
+		switch {
+		case ok:
+			// if the item is an object, then enqueue all of its children
+			for k, v := range m {
+				if item.msg == nil {
+					return nil, fmt.Errorf("JSON structure did not match request type")
+				}
+
+				fd := getFieldByName(item.msg.Descriptor().Fields(), k)
+				if fd == nil {
+					return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
+				}
+
+				if isDynamicProtoMessage(fd.Message()) {
+					for _, p := range buildPathsBlindly(k, v) {
+						newPath := p
+						if item.path != "" {
+							newPath = item.path + "." + newPath
+						}
+						queue = append(queue, fieldMaskPathItem{path: newPath})
+					}
+					continue
+				}
+
+				if isProtobufAnyMessage(fd.Message()) {
+					_, hasTypeField := v.(map[string]interface{})["@type"]
+					if hasTypeField {
+						queue = append(queue, fieldMaskPathItem{path: k})
+						continue
+					} else {
+						return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
+					}
+
+				}
+
+				child := fieldMaskPathItem{
+					node: v,
+				}
+				if item.path == "" {
+					child.path = string(fd.FullName().Name())
+				} else {
+					child.path = item.path + "." + string(fd.FullName().Name())
+				}
+
+				switch {
+				case fd.IsList(), fd.IsMap():
+					// As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
+					// Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
+					fm.Paths = append(fm.Paths, child.path)
+				case fd.Message() != nil:
+					child.msg = item.msg.Get(fd).Message()
+					fallthrough
+				default:
+					queue = append(queue, child)
+				}
+			}
+		case len(item.path) > 0:
+			// otherwise, it's a leaf node so print its path
+			fm.Paths = append(fm.Paths, item.path)
+		}
+	}
+
+	// Sort for deterministic output in the presence
+	// of repeated fields.
+	sort.Strings(fm.Paths)
+
+	return fm, nil
+}
+
+func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
+	return md != nil && (md.FullName() == "google.protobuf.Any")
+}
+
+func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
+	return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
+}
+
+// buildPathsBlindly does not attempt to match proto field names to the
+// json value keys.  Instead it relies completely on the structure of
+// the unmarshalled json contained within in.
+// Returns a slice containing all subpaths with the root at the
+// passed in name and json value.
+func buildPathsBlindly(name string, in interface{}) []string {
+	m, ok := in.(map[string]interface{})
+	if !ok {
+		return []string{name}
+	}
+
+	var paths []string
+	queue := []fieldMaskPathItem{{path: name, node: m}}
+	for len(queue) > 0 {
+		cur := queue[0]
+		queue = queue[1:]
+
+		m, ok := cur.node.(map[string]interface{})
+		if !ok {
+			// This should never happen since we should always check that we only add
+			// nodes of type map[string]interface{} to the queue.
+			continue
+		}
+		for k, v := range m {
+			if mi, ok := v.(map[string]interface{}); ok {
+				queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
+			} else {
+				// This is not a struct, so there are no more levels to descend.
+				curPath := cur.path + "." + k
+				paths = append(paths, curPath)
+			}
+		}
+	}
+	return paths
+}
+
+// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
+type fieldMaskPathItem struct {
+	// the list of prior fields leading up to node connected by dots
+	path string
+
+	// a generic decoded json object the current item to inspect for further path extraction
+	node interface{}
+
+	// parent message
+	msg protoreflect.Message
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
new file mode 100644
index 000000000..d1e21df48
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -0,0 +1,223 @@
+package runtime
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"net/textproto"
+	"strings"
+
+	"google.golang.org/genproto/googleapis/api/httpbody"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+)
+
+// ForwardResponseStream forwards the stream from gRPC server to REST client.
+func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	f, ok := w.(http.Flusher)
+	if !ok {
+		grpclog.Infof("Flush not supported in %T", w)
+		http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+		http.Error(w, "unexpected error", http.StatusInternalServerError)
+		return
+	}
+	handleForwardResponseServerMetadata(w, mux, md)
+
+	w.Header().Set("Transfer-Encoding", "chunked")
+	if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	var delimiter []byte
+	if d, ok := marshaler.(Delimited); ok {
+		delimiter = d.Delimiter()
+	} else {
+		delimiter = []byte("\n")
+	}
+
+	var wroteHeader bool
+	for {
+		resp, err := recv()
+		if err == io.EOF {
+			return
+		}
+		if err != nil {
+			handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+			return
+		}
+		if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+			handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+			return
+		}
+
+		if !wroteHeader {
+			w.Header().Set("Content-Type", marshaler.ContentType(resp))
+		}
+
+		var buf []byte
+		httpBody, isHTTPBody := resp.(*httpbody.HttpBody)
+		switch {
+		case resp == nil:
+			buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
+		case isHTTPBody:
+			buf = httpBody.GetData()
+		default:
+			result := map[string]interface{}{"result": resp}
+			if rb, ok := resp.(responseBody); ok {
+				result["result"] = rb.XXX_ResponseBody()
+			}
+
+			buf, err = marshaler.Marshal(result)
+		}
+
+		if err != nil {
+			grpclog.Infof("Failed to marshal response chunk: %v", err)
+			handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+			return
+		}
+		if _, err = w.Write(buf); err != nil {
+			grpclog.Infof("Failed to send response chunk: %v", err)
+			return
+		}
+		wroteHeader = true
+		if _, err = w.Write(delimiter); err != nil {
+			grpclog.Infof("Failed to send delimiter chunk: %v", err)
+			return
+		}
+		f.Flush()
+	}
+}
+
+func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+	for k, vs := range md.HeaderMD {
+		if h, ok := mux.outgoingHeaderMatcher(k); ok {
+			for _, v := range vs {
+				w.Header().Add(h, v)
+			}
+		}
+	}
+}
+
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
+	for k := range md.TrailerMD {
+		tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
+		w.Header().Add("Trailer", tKey)
+	}
+}
+
+func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
+	for k, vs := range md.TrailerMD {
+		tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
+		for _, v := range vs {
+			w.Header().Add(tKey, v)
+		}
+	}
+}
+
+// responseBody interface contains method for getting field for marshaling to the response body
+// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
+type responseBody interface {
+	XXX_ResponseBody() interface{}
+}
+
+// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
+func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+
+	// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+	// Unless the request includes a TE header field indicating "trailers"
+	// is acceptable, as described in Section 4.3, a server SHOULD NOT
+	// generate trailer fields that it believes are necessary for the user
+	// agent to receive.
+	doForwardTrailers := requestAcceptsTrailers(req)
+
+	if doForwardTrailers {
+		handleForwardResponseTrailerHeader(w, md)
+		w.Header().Set("Transfer-Encoding", "chunked")
+	}
+
+	handleForwardResponseTrailerHeader(w, md)
+
+	contentType := marshaler.ContentType(resp)
+	w.Header().Set("Content-Type", contentType)
+
+	if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+	var buf []byte
+	var err error
+	if rb, ok := resp.(responseBody); ok {
+		buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
+	} else {
+		buf, err = marshaler.Marshal(resp)
+	}
+	if err != nil {
+		grpclog.Infof("Marshal error: %v", err)
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	if _, err = w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	if doForwardTrailers {
+		handleForwardResponseTrailer(w, md)
+	}
+}
+
+func requestAcceptsTrailers(req *http.Request) bool {
+	te := req.Header.Get("TE")
+	return strings.Contains(strings.ToLower(te), "trailers")
+}
+
+func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
+	if len(opts) == 0 {
+		return nil
+	}
+	for _, opt := range opts {
+		if err := opt(ctx, w, resp); err != nil {
+			grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
+			return err
+		}
+	}
+	return nil
+}
+
+func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
+	st := mux.streamErrorHandler(ctx, err)
+	msg := errorChunk(st)
+	if !wroteHeader {
+		w.Header().Set("Content-Type", marshaler.ContentType(msg))
+		w.WriteHeader(HTTPStatusFromCode(st.Code()))
+	}
+	buf, merr := marshaler.Marshal(msg)
+	if merr != nil {
+		grpclog.Infof("Failed to marshal an error: %v", merr)
+		return
+	}
+	if _, werr := w.Write(buf); werr != nil {
+		grpclog.Infof("Failed to notify error to client: %v", werr)
+		return
+	}
+}
+
+func errorChunk(st *status.Status) map[string]proto.Message {
+	return map[string]proto.Message{"error": st.Proto()}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
new file mode 100644
index 000000000..b86135c88
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
@@ -0,0 +1,32 @@
+package runtime
+
+import (
+	"google.golang.org/genproto/googleapis/api/httpbody"
+)
+
+// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
+// google.api.HttpBody message as the full response body if it is
+// the actual message used as the response. If not, then this will
+// simply fallback to the Marshaler specified as its default Marshaler.
+type HTTPBodyMarshaler struct {
+	Marshaler
+}
+
+// ContentType returns its specified content type in case v is a
+// google.api.HttpBody message, otherwise it will fall back to the default Marshalers
+// content type.
+func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
+	if httpBody, ok := v.(*httpbody.HttpBody); ok {
+		return httpBody.GetContentType()
+	}
+	return h.Marshaler.ContentType(v)
+}
+
+// Marshal marshals "v" by returning the body bytes if v is a
+// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
+func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
+	if httpBody, ok := v.(*httpbody.HttpBody); ok {
+		return httpBody.Data, nil
+	}
+	return h.Marshaler.Marshal(v)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
new file mode 100644
index 000000000..d6aa82578
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
@@ -0,0 +1,45 @@
+package runtime
+
+import (
+	"encoding/json"
+	"io"
+)
+
+// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
+// with the standard "encoding/json" package of Golang.
+// Although it is generally faster for simple proto messages than JSONPb,
+// it does not support advanced features of protobuf, e.g. map, oneof, ....
+//
+// The NewEncoder and NewDecoder types return *json.Encoder and
+// *json.Decoder respectively.
+type JSONBuiltin struct{}
+
+// ContentType always Returns "application/json".
+func (*JSONBuiltin) ContentType(_ interface{}) string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON
+func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// Unmarshal unmarshals JSON data into "v".
+func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
+	return json.Unmarshal(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
+	return json.NewDecoder(r)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
+	return json.NewEncoder(w)
+}
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONBuiltin) Delimiter() []byte {
+	return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
new file mode 100644
index 000000000..7387c8e39
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
@@ -0,0 +1,344 @@
+package runtime
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/proto"
+)
+
+// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
+// with the "google.golang.org/protobuf/encoding/protojson" marshaler.
+// It supports the full functionality of protobuf unlike JSONBuiltin.
+//
+// The NewDecoder method returns a DecoderWrapper, so the underlying
+// *json.Decoder methods can be used.
+type JSONPb struct {
+	protojson.MarshalOptions
+	protojson.UnmarshalOptions
+}
+
+// ContentType always returns "application/json".
+func (*JSONPb) ContentType(_ interface{}) string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON.
+func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
+	if _, ok := v.(proto.Message); !ok {
+		return j.marshalNonProtoField(v)
+	}
+
+	var buf bytes.Buffer
+	if err := j.marshalTo(&buf, v); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		buf, err := j.marshalNonProtoField(v)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(buf)
+		return err
+	}
+	b, err := j.MarshalOptions.Marshal(p)
+	if err != nil {
+		return err
+	}
+
+	_, err = w.Write(b)
+	return err
+}
+
+var (
+	// protoMessageType is stored to prevent constant lookup of the same type at runtime.
+	protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+)
+
+// marshalNonProto marshals a non-message field of a protobuf message.
+// This function does not correctly marshal arbitrary data structures into JSON,
+// it is only capable of marshaling non-message field values of protobuf,
+// i.e. primitive types, enums; pointers to primitives or enums; maps from
+// integer/string types to primitives/enums/pointers to messages.
+func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
+	if v == nil {
+		return []byte("null"), nil
+	}
+	rv := reflect.ValueOf(v)
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			return []byte("null"), nil
+		}
+		rv = rv.Elem()
+	}
+
+	if rv.Kind() == reflect.Slice {
+		if rv.IsNil() {
+			if j.EmitUnpopulated {
+				return []byte("[]"), nil
+			}
+			return []byte("null"), nil
+		}
+
+		if rv.Type().Elem().Implements(protoMessageType) {
+			var buf bytes.Buffer
+			err := buf.WriteByte('[')
+			if err != nil {
+				return nil, err
+			}
+			for i := 0; i < rv.Len(); i++ {
+				if i != 0 {
+					err = buf.WriteByte(',')
+					if err != nil {
+						return nil, err
+					}
+				}
+				if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+					return nil, err
+				}
+			}
+			err = buf.WriteByte(']')
+			if err != nil {
+				return nil, err
+			}
+
+			return buf.Bytes(), nil
+		}
+
+		if rv.Type().Elem().Implements(typeProtoEnum) {
+			var buf bytes.Buffer
+			err := buf.WriteByte('[')
+			if err != nil {
+				return nil, err
+			}
+			for i := 0; i < rv.Len(); i++ {
+				if i != 0 {
+					err = buf.WriteByte(',')
+					if err != nil {
+						return nil, err
+					}
+				}
+				if j.UseEnumNumbers {
+					_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
+				} else {
+					_, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
+				}
+				if err != nil {
+					return nil, err
+				}
+			}
+			err = buf.WriteByte(']')
+			if err != nil {
+				return nil, err
+			}
+
+			return buf.Bytes(), nil
+		}
+	}
+
+	if rv.Kind() == reflect.Map {
+		m := make(map[string]*json.RawMessage)
+		for _, k := range rv.MapKeys() {
+			buf, err := j.Marshal(rv.MapIndex(k).Interface())
+			if err != nil {
+				return nil, err
+			}
+			m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
+		}
+		if j.Indent != "" {
+			return json.MarshalIndent(m, "", j.Indent)
+		}
+		return json.Marshal(m)
+	}
+	if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
+		return json.Marshal(enum.String())
+	}
+	return json.Marshal(rv.Interface())
+}
+
+// Unmarshal unmarshals JSON "data" into "v"
+func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
+	return unmarshalJSONPb(data, j.UnmarshalOptions, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
+	d := json.NewDecoder(r)
+	return DecoderWrapper{
+		Decoder:          d,
+		UnmarshalOptions: j.UnmarshalOptions,
+	}
+}
+
+// DecoderWrapper is a wrapper around a *json.Decoder that adds
+// support for protos to the Decode method.
+type DecoderWrapper struct {
+	*json.Decoder
+	protojson.UnmarshalOptions
+}
+
+// Decode wraps the embedded decoder's Decode method to support
+// protos using a jsonpb.Unmarshaler.
+func (d DecoderWrapper) Decode(v interface{}) error {
+	return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
+	return EncoderFunc(func(v interface{}) error {
+		if err := j.marshalTo(w, v); err != nil {
+			return err
+		}
+		// mimic json.Encoder by adding a newline (makes output
+		// easier to read when it contains multiple encoded items)
+		_, err := w.Write(j.Delimiter())
+		return err
+	})
+}
+
+func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+	d := json.NewDecoder(bytes.NewReader(data))
+	return decodeJSONPb(d, unmarshaler, v)
+}
+
+func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		return decodeNonProtoField(d, unmarshaler, v)
+	}
+
+	// Decode into bytes for marshalling
+	var b json.RawMessage
+	err := d.Decode(&b)
+	if err != nil {
+		return err
+	}
+
+	return unmarshaler.Unmarshal([]byte(b), p)
+}
+
+func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr {
+		return fmt.Errorf("%T is not a pointer", v)
+	}
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			rv.Set(reflect.New(rv.Type().Elem()))
+		}
+		if rv.Type().ConvertibleTo(typeProtoMessage) {
+			// Decode into bytes for marshalling
+			var b json.RawMessage
+			err := d.Decode(&b)
+			if err != nil {
+				return err
+			}
+
+			return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
+		}
+		rv = rv.Elem()
+	}
+	if rv.Kind() == reflect.Map {
+		if rv.IsNil() {
+			rv.Set(reflect.MakeMap(rv.Type()))
+		}
+		conv, ok := convFromType[rv.Type().Key().Kind()]
+		if !ok {
+			return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
+		}
+
+		m := make(map[string]*json.RawMessage)
+		if err := d.Decode(&m); err != nil {
+			return err
+		}
+		for k, v := range m {
+			result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
+			if err := result[1].Interface(); err != nil {
+				return err.(error)
+			}
+			bk := result[0]
+			bv := reflect.New(rv.Type().Elem())
+			if v == nil {
+				null := json.RawMessage("null")
+				v = &null
+			}
+			if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
+				return err
+			}
+			rv.SetMapIndex(bk, bv.Elem())
+		}
+		return nil
+	}
+	if rv.Kind() == reflect.Slice {
+		var sl []json.RawMessage
+		if err := d.Decode(&sl); err != nil {
+			return err
+		}
+		if sl != nil {
+			rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
+		}
+		for _, item := range sl {
+			bv := reflect.New(rv.Type().Elem())
+			if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
+				return err
+			}
+			rv.Set(reflect.Append(rv, bv.Elem()))
+		}
+		return nil
+	}
+	if _, ok := rv.Interface().(protoEnum); ok {
+		var repr interface{}
+		if err := d.Decode(&repr); err != nil {
+			return err
+		}
+		switch v := repr.(type) {
+		case string:
+			// TODO(yugui) Should use proto.StructProperties?
+			return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
+		case float64:
+			rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
+			return nil
+		default:
+			return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
+		}
+	}
+	return d.Decode(v)
+}
+
+type protoEnum interface {
+	fmt.Stringer
+	EnumDescriptor() ([]byte, []int)
+}
+
+var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
+
+var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONPb) Delimiter() []byte {
+	return []byte("\n")
+}
+
+var (
+	convFromType = map[reflect.Kind]reflect.Value{
+		reflect.String:  reflect.ValueOf(String),
+		reflect.Bool:    reflect.ValueOf(Bool),
+		reflect.Float64: reflect.ValueOf(Float64),
+		reflect.Float32: reflect.ValueOf(Float32),
+		reflect.Int64:   reflect.ValueOf(Int64),
+		reflect.Int32:   reflect.ValueOf(Int32),
+		reflect.Uint64:  reflect.ValueOf(Uint64),
+		reflect.Uint32:  reflect.ValueOf(Uint32),
+		reflect.Slice:   reflect.ValueOf(Bytes),
+	}
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
new file mode 100644
index 000000000..007f8f1a2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
@@ -0,0 +1,63 @@
+package runtime
+
+import (
+	"io"
+
+	"errors"
+	"io/ioutil"
+
+	"google.golang.org/protobuf/proto"
+)
+
+// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
+type ProtoMarshaller struct{}
+
+// ContentType always returns "application/octet-stream".
+func (*ProtoMarshaller) ContentType(_ interface{}) string {
+	return "application/octet-stream"
+}
+
+// Marshal marshals "value" into Proto
+func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return nil, errors.New("unable to marshal non proto field")
+	}
+	return proto.Marshal(message)
+}
+
+// Unmarshal unmarshals proto "data" into "value"
+func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return errors.New("unable to unmarshal non proto field")
+	}
+	return proto.Unmarshal(data, message)
+}
+
+// NewDecoder returns a Decoder which reads proto stream from "reader".
+func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
+	return DecoderFunc(func(value interface{}) error {
+		buffer, err := ioutil.ReadAll(reader)
+		if err != nil {
+			return err
+		}
+		return marshaller.Unmarshal(buffer, value)
+	})
+}
+
+// NewEncoder returns an Encoder which writes proto stream into "writer".
+func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
+	return EncoderFunc(func(value interface{}) error {
+		buffer, err := marshaller.Marshal(value)
+		if err != nil {
+			return err
+		}
+		_, err = writer.Write(buffer)
+		if err != nil {
+			return err
+		}
+
+		return nil
+	})
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
new file mode 100644
index 000000000..2c0d25ff4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
@@ -0,0 +1,50 @@
+package runtime
+
+import (
+	"io"
+)
+
+// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
+type Marshaler interface {
+	// Marshal marshals "v" into byte sequence.
+	Marshal(v interface{}) ([]byte, error)
+	// Unmarshal unmarshals "data" into "v".
+	// "v" must be a pointer value.
+	Unmarshal(data []byte, v interface{}) error
+	// NewDecoder returns a Decoder which reads byte sequence from "r".
+	NewDecoder(r io.Reader) Decoder
+	// NewEncoder returns an Encoder which writes bytes sequence into "w".
+	NewEncoder(w io.Writer) Encoder
+	// ContentType returns the Content-Type which this marshaler is responsible for.
+	// The parameter describes the type which is being marshalled, which can sometimes
+	// affect the content type returned.
+	ContentType(v interface{}) string
+}
+
+// Decoder decodes a byte sequence
+type Decoder interface {
+	Decode(v interface{}) error
+}
+
+// Encoder encodes gRPC payloads / fields into byte sequence.
+type Encoder interface {
+	Encode(v interface{}) error
+}
+
+// DecoderFunc adapts an decoder function into Decoder.
+type DecoderFunc func(v interface{}) error
+
+// Decode delegates invocations to the underlying function itself.
+func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
+
+// EncoderFunc adapts an encoder function into Encoder
+type EncoderFunc func(v interface{}) error
+
+// Encode delegates invocations to the underlying function itself.
+func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
+
+// Delimited defines the streaming delimiter.
+type Delimited interface {
+	// Delimiter returns the record separator for the stream.
+	Delimiter() []byte
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
new file mode 100644
index 000000000..a714de024
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
@@ -0,0 +1,109 @@
+package runtime
+
+import (
+	"errors"
+	"mime"
+	"net/http"
+
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/protobuf/encoding/protojson"
+)
+
+// MIMEWildcard is the fallback MIME type used for requests which do not match
+// a registered MIME type.
+const MIMEWildcard = "*"
+
+var (
+	acceptHeader      = http.CanonicalHeaderKey("Accept")
+	contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
+
+	defaultMarshaler = &HTTPBodyMarshaler{
+		Marshaler: &JSONPb{
+			MarshalOptions: protojson.MarshalOptions{
+				EmitUnpopulated: true,
+			},
+			UnmarshalOptions: protojson.UnmarshalOptions{
+				DiscardUnknown: true,
+			},
+		},
+	}
+)
+
+// MarshalerForRequest returns the inbound/outbound marshalers for this request.
+// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
+// If it isn't set (or the request Content-Type is empty), checks for "*".
+// If there are multiple Content-Type headers set, choose the first one that it can
+// exactly match in the registry.
+// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
+func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
+	for _, acceptVal := range r.Header[acceptHeader] {
+		if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
+			outbound = m
+			break
+		}
+	}
+
+	for _, contentTypeVal := range r.Header[contentTypeHeader] {
+		contentType, _, err := mime.ParseMediaType(contentTypeVal)
+		if err != nil {
+			grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err)
+			continue
+		}
+		if m, ok := mux.marshalers.mimeMap[contentType]; ok {
+			inbound = m
+			break
+		}
+	}
+
+	if inbound == nil {
+		inbound = mux.marshalers.mimeMap[MIMEWildcard]
+	}
+	if outbound == nil {
+		outbound = inbound
+	}
+
+	return inbound, outbound
+}
+
+// marshalerRegistry is a mapping from MIME types to Marshalers.
+type marshalerRegistry struct {
+	mimeMap map[string]Marshaler
+}
+
+// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
+// MIME type).
+func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
+	if len(mime) == 0 {
+		return errors.New("empty MIME type")
+	}
+
+	m.mimeMap[mime] = marshaler
+
+	return nil
+}
+
+// makeMarshalerMIMERegistry returns a new registry of marshalers.
+// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
+//
+// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
+// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
+// with a "application/json" Content-Type.
+// "*" can be used to match any Content-Type.
+// This can be attached to a ServerMux with the marshaler option.
+func makeMarshalerMIMERegistry() marshalerRegistry {
+	return marshalerRegistry{
+		mimeMap: map[string]Marshaler{
+			MIMEWildcard: defaultMarshaler,
+		},
+	}
+}
+
+// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
+// Marshalers to a MIME type in mux.
+func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
+	return func(mux *ServeMux) {
+		if err := mux.marshalers.add(mime, marshaler); err != nil {
+			panic(err)
+		}
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
new file mode 100644
index 000000000..46a4aabaf
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -0,0 +1,356 @@
+package runtime
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/textproto"
+	"strings"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+)
+
+// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
+type UnescapingMode int
+
+const (
+	// UnescapingModeLegacy is the default V2 behavior, which escapes the entire
+	// path string before doing any routing.
+	UnescapingModeLegacy UnescapingMode = iota
+
+	// EscapingTypeExceptReserved unescapes all path parameters except RFC 6570
+	// reserved characters.
+	UnescapingModeAllExceptReserved
+
+	// EscapingTypeExceptSlash unescapes URL path parameters except path
+	// seperators, which will be left as "%2F".
+	UnescapingModeAllExceptSlash
+
+	// URL path parameters will be fully decoded.
+	UnescapingModeAllCharacters
+
+	// UnescapingModeDefault is the default escaping type.
+	// TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
+	// reference implementation
+	UnescapingModeDefault = UnescapingModeLegacy
+)
+
+// A HandlerFunc handles a specific pair of path pattern and HTTP method.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
+
+// ServeMux is a request multiplexer for grpc-gateway.
+// It matches http requests to patterns and invokes the corresponding handler.
+type ServeMux struct {
+	// handlers maps HTTP method to a list of handlers.
+	handlers                  map[string][]handler
+	forwardResponseOptions    []func(context.Context, http.ResponseWriter, proto.Message) error
+	marshalers                marshalerRegistry
+	incomingHeaderMatcher     HeaderMatcherFunc
+	outgoingHeaderMatcher     HeaderMatcherFunc
+	metadataAnnotators        []func(context.Context, *http.Request) metadata.MD
+	errorHandler              ErrorHandlerFunc
+	streamErrorHandler        StreamErrorHandlerFunc
+	routingErrorHandler       RoutingErrorHandlerFunc
+	disablePathLengthFallback bool
+	unescapingMode            UnescapingMode
+}
+
+// ServeMuxOption is an option that can be given to a ServeMux on construction.
+type ServeMuxOption func(*ServeMux)
+
+// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
+//
+// forwardResponseOption is an option that will be called on the relevant context.Context,
+// http.ResponseWriter, and proto.Message before every forwarded response.
+//
+// The message may be nil in the case where just a header is being sent.
+func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
+	}
+}
+
+// WithEscapingType sets the escaping type. See the definitions of UnescapingMode
+// for more information.
+func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.unescapingMode = mode
+	}
+}
+
+// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
+// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
+// done with careful consideration.
+func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		currentQueryParser = queryParameterParser
+	}
+}
+
+// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
+type HeaderMatcherFunc func(string) (string, bool)
+
+// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
+// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
+// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
+func DefaultHeaderMatcher(key string) (string, bool) {
+	key = textproto.CanonicalMIMEHeaderKey(key)
+	if isPermanentHTTPHeader(key) {
+		return MetadataPrefix + key, true
+	} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
+		return key[len(MetadataHeaderPrefix):], true
+	}
+	return "", false
+}
+
+// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
+//
+// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
+func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.incomingHeaderMatcher = fn
+	}
+}
+
+// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return modified header.
+func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.outgoingHeaderMatcher = fn
+	}
+}
+
+// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
+// is reading token from cookie and adding it in gRPC context.
+func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
+	}
+}
+
+// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
+//
+// This can be used to configure a custom error response.
+func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.errorHandler = fn
+	}
+}
+
+// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
+// error handler, which allows for customizing the error trailer for server-streaming
+// calls.
+//
+// For stream errors that occur before any response has been written, the mux's
+// ErrorHandler will be invoked. However, once data has been written, the errors must
+// be handled differently: they must be included in the response body. The response body's
+// final message will include the error details returned by the stream error handler.
+func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.streamErrorHandler = fn
+	}
+}
+
+// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to  handle http routing errors.
+//
+// Method called for errors which can happen before gRPC route selected or executed.
+// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
+func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.routingErrorHandler = fn
+	}
+}
+
+// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
+func WithDisablePathLengthFallback() ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.disablePathLengthFallback = true
+	}
+}
+
+// NewServeMux returns a new ServeMux whose internal mapping is empty.
+func NewServeMux(opts ...ServeMuxOption) *ServeMux {
+	serveMux := &ServeMux{
+		handlers:               make(map[string][]handler),
+		forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
+		marshalers:             makeMarshalerMIMERegistry(),
+		errorHandler:           DefaultHTTPErrorHandler,
+		streamErrorHandler:     DefaultStreamErrorHandler,
+		routingErrorHandler:    DefaultRoutingErrorHandler,
+		unescapingMode:         UnescapingModeDefault,
+	}
+
+	for _, opt := range opts {
+		opt(serveMux)
+	}
+
+	if serveMux.incomingHeaderMatcher == nil {
+		serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
+	}
+
+	if serveMux.outgoingHeaderMatcher == nil {
+		serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
+			return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+		}
+	}
+
+	return serveMux
+}
+
+// Handle associates "h" to the pair of HTTP method and path pattern.
+func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
+	s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
+}
+
+// HandlePath allows users to configure custom path handlers.
+// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
+func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
+	compiler, err := httprule.Parse(pathPattern)
+	if err != nil {
+		return fmt.Errorf("parsing path pattern: %w", err)
+	}
+	tp := compiler.Compile()
+	pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
+	if err != nil {
+		return fmt.Errorf("creating new pattern: %w", err)
+	}
+	s.Handle(meth, pattern, h)
+	return nil
+}
+
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
+func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx := r.Context()
+
+	path := r.URL.Path
+	if !strings.HasPrefix(path, "/") {
+		_, outboundMarshaler := MarshalerForRequest(s, r)
+		s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
+		return
+	}
+
+	// TODO(v3): remove UnescapingModeLegacy
+	if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
+		path = r.URL.RawPath
+	}
+
+	components := strings.Split(path[1:], "/")
+
+	if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
+		r.Method = strings.ToUpper(override)
+		if err := r.ParseForm(); err != nil {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			sterr := status.Error(codes.InvalidArgument, err.Error())
+			s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+			return
+		}
+	}
+
+	// Verb out here is to memoize for the fallback case below
+	var verb string
+
+	for _, h := range s.handlers[r.Method] {
+		// If the pattern has a verb, explicitly look for a suffix in the last
+		// component that matches a colon plus the verb. This allows us to
+		// handle some cases that otherwise can't be correctly handled by the
+		// former LastIndex case, such as when the verb literal itself contains
+		// a colon. This should work for all cases that have run through the
+		// parser because we know what verb we're looking for, however, there
+		// are still some cases that the parser itself cannot disambiguate. See
+		// the comment there if interested.
+		patVerb := h.pat.Verb()
+		l := len(components)
+		lastComponent := components[l-1]
+		var idx int = -1
+		if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
+			idx = len(lastComponent) - len(patVerb) - 1
+		}
+		if idx == 0 {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+			return
+		}
+		if idx > 0 {
+			components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:]
+		}
+
+		pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
+		if err != nil {
+			var mse MalformedSequenceError
+			if ok := errors.As(err, &mse); ok {
+				_, outboundMarshaler := MarshalerForRequest(s, r)
+				s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+					HTTPStatus: http.StatusBadRequest,
+					Err:        mse,
+				})
+			}
+			continue
+		}
+		h.h(w, r, pathParams)
+		return
+	}
+
+	// lookup other methods to handle fallback from GET to POST and
+	// to determine if it is NotImplemented or NotFound.
+	for m, handlers := range s.handlers {
+		if m == r.Method {
+			continue
+		}
+		for _, h := range handlers {
+			pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
+			if err != nil {
+				var mse MalformedSequenceError
+				if ok := errors.As(err, &mse); ok {
+					_, outboundMarshaler := MarshalerForRequest(s, r)
+					s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+						HTTPStatus: http.StatusBadRequest,
+						Err:        mse,
+					})
+				}
+				continue
+			}
+			// X-HTTP-Method-Override is optional. Always allow fallback to POST.
+			if s.isPathLengthFallback(r) {
+				if err := r.ParseForm(); err != nil {
+					_, outboundMarshaler := MarshalerForRequest(s, r)
+					sterr := status.Error(codes.InvalidArgument, err.Error())
+					s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+					return
+				}
+				h.h(w, r, pathParams)
+				return
+			}
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
+			return
+		}
+	}
+
+	_, outboundMarshaler := MarshalerForRequest(s, r)
+	s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+}
+
+// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
+func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
+	return s.forwardResponseOptions
+}
+
+func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
+	return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
+}
+
+type handler struct {
+	pat Pattern
+	h   HandlerFunc
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
new file mode 100644
index 000000000..df7cb8142
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
@@ -0,0 +1,383 @@
+package runtime
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+	"google.golang.org/grpc/grpclog"
+)
+
+var (
+	// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
+	ErrNotMatch = errors.New("not match to the path pattern")
+	// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
+	ErrInvalidPattern = errors.New("invalid pattern")
+	// ErrMalformedSequence indicates that an escape sequence was malformed.
+	ErrMalformedSequence = errors.New("malformed escape sequence")
+)
+
+type MalformedSequenceError string
+
+func (e MalformedSequenceError) Error() string {
+	return "malformed path escape " + strconv.Quote(string(e))
+}
+
+type op struct {
+	code    utilities.OpCode
+	operand int
+}
+
+// Pattern is a template pattern of http request paths defined in
+// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
+type Pattern struct {
+	// ops is a list of operations
+	ops []op
+	// pool is a constant pool indexed by the operands or vars.
+	pool []string
+	// vars is a list of variables names to be bound by this pattern
+	vars []string
+	// stacksize is the max depth of the stack
+	stacksize int
+	// tailLen is the length of the fixed-size segments after a deep wildcard
+	tailLen int
+	// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
+	verb string
+}
+
+// NewPattern returns a new Pattern from the given definition values.
+// "ops" is a sequence of op codes. "pool" is a constant pool.
+// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
+// "version" must be 1 for now.
+// It returns an error if the given definition is invalid.
+func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
+	if version != 1 {
+		grpclog.Infof("unsupported version: %d", version)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	l := len(ops)
+	if l%2 != 0 {
+		grpclog.Infof("odd number of ops codes: %d", l)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	var (
+		typedOps        []op
+		stack, maxstack int
+		tailLen         int
+		pushMSeen       bool
+		vars            []string
+	)
+	for i := 0; i < l; i += 2 {
+		op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpPushM:
+			if pushMSeen {
+				grpclog.Infof("pushM appears twice")
+				return Pattern{}, ErrInvalidPattern
+			}
+			pushMSeen = true
+			stack++
+		case utilities.OpLitPush:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("negative literal index: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpConcatN:
+			if op.operand <= 0 {
+				grpclog.Infof("negative concat size: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack -= op.operand
+			if stack < 0 {
+				grpclog.Info("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack++
+		case utilities.OpCapture:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("variable name index out of bound: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			v := pool[op.operand]
+			op.operand = len(vars)
+			vars = append(vars, v)
+			stack--
+			if stack < 0 {
+				grpclog.Infof("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+		default:
+			grpclog.Infof("invalid opcode: %d", op.code)
+			return Pattern{}, ErrInvalidPattern
+		}
+
+		if maxstack < stack {
+			maxstack = stack
+		}
+		typedOps = append(typedOps, op)
+	}
+	return Pattern{
+		ops:       typedOps,
+		pool:      pool,
+		vars:      vars,
+		stacksize: maxstack,
+		tailLen:   tailLen,
+		verb:      verb,
+	}, nil
+}
+
+// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
+func MustPattern(p Pattern, err error) Pattern {
+	if err != nil {
+		grpclog.Fatalf("Pattern initialization failed: %v", err)
+	}
+	return p
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// MatchAndEscape will return an error if no Patterns matched or if a pattern
+// matched but contained malformed escape sequences. If successful, the function
+// returns a mapping from field paths to their captured values.
+func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
+	if p.verb != verb {
+		if p.verb != "" {
+			return nil, ErrNotMatch
+		}
+		if len(components) == 0 {
+			components = []string{":" + verb}
+		} else {
+			components = append([]string{}, components...)
+			components[len(components)-1] += ":" + verb
+		}
+	}
+
+	var pos int
+	stack := make([]string, 0, p.stacksize)
+	captured := make([]string, len(p.vars))
+	l := len(components)
+	for _, op := range p.ops {
+		var err error
+
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush, utilities.OpLitPush:
+			if pos >= l {
+				return nil, ErrNotMatch
+			}
+			c := components[pos]
+			if op.code == utilities.OpLitPush {
+				if lit := p.pool[op.operand]; c != lit {
+					return nil, ErrNotMatch
+				}
+			} else if op.code == utilities.OpPush {
+				if c, err = unescape(c, unescapingMode, false); err != nil {
+					return nil, err
+				}
+			}
+			stack = append(stack, c)
+			pos++
+		case utilities.OpPushM:
+			end := len(components)
+			if end < pos+p.tailLen {
+				return nil, ErrNotMatch
+			}
+			end -= p.tailLen
+			c := strings.Join(components[pos:end], "/")
+			if c, err = unescape(c, unescapingMode, true); err != nil {
+				return nil, err
+			}
+			stack = append(stack, c)
+			pos = end
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			captured[op.operand] = stack[n]
+			stack = stack[:n]
+		}
+	}
+	if pos < l {
+		return nil, ErrNotMatch
+	}
+	bindings := make(map[string]string)
+	for i, val := range captured {
+		bindings[p.vars[i]] = val
+	}
+	return bindings, nil
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// It will never perform per-component unescaping (see: UnescapingModeLegacy).
+// MatchAndEscape will return an error if no Patterns matched. If successful,
+// the function returns a mapping from field paths to their captured values.
+//
+// Deprecated: Use MatchAndEscape.
+func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
+	return p.MatchAndEscape(components, verb, UnescapingModeDefault)
+}
+
+// Verb returns the verb part of the Pattern.
+func (p Pattern) Verb() string { return p.verb }
+
+func (p Pattern) String() string {
+	var stack []string
+	for _, op := range p.ops {
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			stack = append(stack, "*")
+		case utilities.OpLitPush:
+			stack = append(stack, p.pool[op.operand])
+		case utilities.OpPushM:
+			stack = append(stack, "**")
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
+		}
+	}
+	segs := strings.Join(stack, "/")
+	if p.verb != "" {
+		return fmt.Sprintf("/%s:%s", segs, p.verb)
+	}
+	return "/" + segs
+}
+
+/*
+ * The following code is adopted and modified from Go's standard library
+ * and carries the attached license.
+ *
+ *     Copyright 2009 The Go Authors. All rights reserved.
+ *     Use of this source code is governed by a BSD-style
+ *     license that can be found in the LICENSE file.
+ */
+
+// ishex returns whether or not the given byte is a valid hex character
+func ishex(c byte) bool {
+	switch {
+	case '0' <= c && c <= '9':
+		return true
+	case 'a' <= c && c <= 'f':
+		return true
+	case 'A' <= c && c <= 'F':
+		return true
+	}
+	return false
+}
+
+func isRFC6570Reserved(c byte) bool {
+	switch c {
+	case '!', '#', '$', '&', '\'', '(', ')', '*',
+		'+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
+		return true
+	default:
+		return false
+	}
+}
+
+// unhex converts a hex point to the bit representation
+func unhex(c byte) byte {
+	switch {
+	case '0' <= c && c <= '9':
+		return c - '0'
+	case 'a' <= c && c <= 'f':
+		return c - 'a' + 10
+	case 'A' <= c && c <= 'F':
+		return c - 'A' + 10
+	}
+	return 0
+}
+
+// shouldUnescapeWithMode returns true if the character is escapable with the
+// given mode
+func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
+	switch mode {
+	case UnescapingModeAllExceptReserved:
+		if isRFC6570Reserved(c) {
+			return false
+		}
+	case UnescapingModeAllExceptSlash:
+		if c == '/' {
+			return false
+		}
+	case UnescapingModeAllCharacters:
+		return true
+	}
+	return true
+}
+
+// unescape unescapes a path string using the provided mode
+func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
+	// TODO(v3): remove UnescapingModeLegacy
+	if mode == UnescapingModeLegacy {
+		return s, nil
+	}
+
+	if !multisegment {
+		mode = UnescapingModeAllCharacters
+	}
+
+	// Count %, check that they're well-formed.
+	n := 0
+	for i := 0; i < len(s); {
+		if s[i] == '%' {
+			n++
+			if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
+				s = s[i:]
+				if len(s) > 3 {
+					s = s[:3]
+				}
+
+				return "", MalformedSequenceError(s)
+			}
+			i += 3
+		} else {
+			i++
+		}
+	}
+
+	if n == 0 {
+		return s, nil
+	}
+
+	var t strings.Builder
+	t.Grow(len(s))
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '%':
+			c := unhex(s[i+1])<<4 | unhex(s[i+2])
+			if shouldUnescapeWithMode(c, mode) {
+				t.WriteByte(c)
+				i += 2
+				continue
+			}
+			fallthrough
+		default:
+			t.WriteByte(s[i])
+		}
+	}
+
+	return t.String(), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
new file mode 100644
index 000000000..d549407f2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
@@ -0,0 +1,80 @@
+package runtime
+
+import (
+	"google.golang.org/protobuf/proto"
+)
+
+// StringP returns a pointer to a string whose pointee is same as the given string value.
+func StringP(val string) (*string, error) {
+	return proto.String(val), nil
+}
+
+// BoolP parses the given string representation of a boolean value,
+// and returns a pointer to a bool whose value is same as the parsed value.
+func BoolP(val string) (*bool, error) {
+	b, err := Bool(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Bool(b), nil
+}
+
+// Float64P parses the given string representation of a floating point number,
+// and returns a pointer to a float64 whose value is same as the parsed number.
+func Float64P(val string) (*float64, error) {
+	f, err := Float64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float64(f), nil
+}
+
+// Float32P parses the given string representation of a floating point number,
+// and returns a pointer to a float32 whose value is same as the parsed number.
+func Float32P(val string) (*float32, error) {
+	f, err := Float32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float32(f), nil
+}
+
+// Int64P parses the given string representation of an integer
+// and returns a pointer to a int64 whose value is same as the parsed integer.
+func Int64P(val string) (*int64, error) {
+	i, err := Int64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int64(i), nil
+}
+
+// Int32P parses the given string representation of an integer
+// and returns a pointer to a int32 whose value is same as the parsed integer.
+func Int32P(val string) (*int32, error) {
+	i, err := Int32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int32(i), err
+}
+
+// Uint64P parses the given string representation of an integer
+// and returns a pointer to a uint64 whose value is same as the parsed integer.
+func Uint64P(val string) (*uint64, error) {
+	i, err := Uint64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint64(i), err
+}
+
+// Uint32P parses the given string representation of an integer
+// and returns a pointer to a uint32 whose value is same as the parsed integer.
+func Uint32P(val string) (*uint32, error) {
+	i, err := Uint32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint32(i), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
new file mode 100644
index 000000000..fb0c84ef0
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -0,0 +1,329 @@
+package runtime
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+	"google.golang.org/genproto/protobuf/field_mask"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/known/durationpb"
+	"google.golang.org/protobuf/types/known/timestamppb"
+	"google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
+
+var currentQueryParser QueryParameterParser = &defaultQueryParser{}
+
+// QueryParameterParser defines interface for all query parameter parsers
+type QueryParameterParser interface {
+	Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
+}
+
+// PopulateQueryParameters parses query parameters
+// into "msg" using current query parser
+func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+	return currentQueryParser.Parse(msg, values, filter)
+}
+
+type defaultQueryParser struct{}
+
+// Parse populates "values" into "msg".
+// A value is ignored if its key starts with one of the elements in "filter".
+func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+	for key, values := range values {
+		match := valuesKeyRegexp.FindStringSubmatch(key)
+		if len(match) == 3 {
+			key = match[1]
+			values = append([]string{match[2]}, values...)
+		}
+		fieldPath := strings.Split(key, ".")
+		if filter.HasCommonPrefix(fieldPath) {
+			continue
+		}
+		if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// PopulateFieldFromPath sets a value in a nested Protobuf structure.
+func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
+	fieldPath := strings.Split(fieldPathString, ".")
+	return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
+}
+
+func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
+	if len(fieldPath) < 1 {
+		return errors.New("no field path")
+	}
+	if len(values) < 1 {
+		return errors.New("no value provided")
+	}
+
+	var fieldDescriptor protoreflect.FieldDescriptor
+	for i, fieldName := range fieldPath {
+		fields := msgValue.Descriptor().Fields()
+
+		// Get field by name
+		fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
+		if fieldDescriptor == nil {
+			fieldDescriptor = fields.ByJSONName(fieldName)
+			if fieldDescriptor == nil {
+				// We're not returning an error here because this could just be
+				// an extra query parameter that isn't part of the request.
+				grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
+				return nil
+			}
+		}
+
+		// If this is the last element, we're done
+		if i == len(fieldPath)-1 {
+			break
+		}
+
+		// Only singular message fields are allowed
+		if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
+			return fmt.Errorf("invalid path: %q is not a message", fieldName)
+		}
+
+		// Get the nested message
+		msgValue = msgValue.Mutable(fieldDescriptor).Message()
+	}
+
+	// Check if oneof already set
+	if of := fieldDescriptor.ContainingOneof(); of != nil {
+		if f := msgValue.WhichOneof(of); f != nil {
+			return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
+		}
+	}
+
+	switch {
+	case fieldDescriptor.IsList():
+		return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
+	case fieldDescriptor.IsMap():
+		return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
+	}
+
+	if len(values) > 1 {
+		return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
+	}
+
+	return populateField(fieldDescriptor, msgValue, values[0])
+}
+
+func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
+	v, err := parseField(fieldDescriptor, value)
+	if err != nil {
+		return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
+	}
+
+	msgValue.Set(fieldDescriptor, v)
+	return nil
+}
+
+func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
+	for _, value := range values {
+		v, err := parseField(fieldDescriptor, value)
+		if err != nil {
+			return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
+		}
+		list.Append(v)
+	}
+
+	return nil
+}
+
+func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
+	if len(values) != 2 {
+		return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
+	}
+
+	key, err := parseField(fieldDescriptor.MapKey(), values[0])
+	if err != nil {
+		return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
+	}
+
+	value, err := parseField(fieldDescriptor.MapValue(), values[1])
+	if err != nil {
+		return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
+	}
+
+	mp.Set(key.MapKey(), value)
+
+	return nil
+}
+
+func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
+	switch fieldDescriptor.Kind() {
+	case protoreflect.BoolKind:
+		v, err := strconv.ParseBool(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfBool(v), nil
+	case protoreflect.EnumKind:
+		enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
+		switch {
+		case errors.Is(err, protoregistry.NotFound):
+			return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
+		case err != nil:
+			return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
+		}
+		// Look for enum by name
+		v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
+		if v == nil {
+			i, err := strconv.Atoi(value)
+			if err != nil {
+				return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+			}
+			// Look for enum by number
+			v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i))
+			if v == nil {
+				return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+			}
+		}
+		return protoreflect.ValueOfEnum(v.Number()), nil
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		v, err := strconv.ParseInt(value, 10, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfInt32(int32(v)), nil
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		v, err := strconv.ParseInt(value, 10, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfInt64(v), nil
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		v, err := strconv.ParseUint(value, 10, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfUint32(uint32(v)), nil
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		v, err := strconv.ParseUint(value, 10, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfUint64(v), nil
+	case protoreflect.FloatKind:
+		v, err := strconv.ParseFloat(value, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfFloat32(float32(v)), nil
+	case protoreflect.DoubleKind:
+		v, err := strconv.ParseFloat(value, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfFloat64(v), nil
+	case protoreflect.StringKind:
+		return protoreflect.ValueOfString(value), nil
+	case protoreflect.BytesKind:
+		v, err := base64.URLEncoding.DecodeString(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return protoreflect.ValueOfBytes(v), nil
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		return parseMessage(fieldDescriptor.Message(), value)
+	default:
+		panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
+	}
+}
+
+func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
+	var msg proto.Message
+	switch msgDescriptor.FullName() {
+	case "google.protobuf.Timestamp":
+		if value == "null" {
+			break
+		}
+		t, err := time.Parse(time.RFC3339Nano, value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = timestamppb.New(t)
+	case "google.protobuf.Duration":
+		if value == "null" {
+			break
+		}
+		d, err := time.ParseDuration(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = durationpb.New(d)
+	case "google.protobuf.DoubleValue":
+		v, err := strconv.ParseFloat(value, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &wrapperspb.DoubleValue{Value: v}
+	case "google.protobuf.FloatValue":
+		v, err := strconv.ParseFloat(value, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &wrapperspb.FloatValue{Value: float32(v)}
+	case "google.protobuf.Int64Value":
+		v, err := strconv.ParseInt(value, 10, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &wrapperspb.Int64Value{Value: v}
+	case "google.protobuf.Int32Value":
+		v, err := strconv.ParseInt(value, 10, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &wrapperspb.Int32Value{Value: int32(v)}
+	case "google.protobuf.UInt64Value":
+		v, err := strconv.ParseUint(value, 10, 64)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &wrapperspb.UInt64Value{Value: v}
+	case "google.protobuf.UInt32Value":
+		v, err := strconv.ParseUint(value, 10, 32)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &wrapperspb.UInt32Value{Value: uint32(v)}
+	case "google.protobuf.BoolValue":
+		v, err := strconv.ParseBool(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &wrapperspb.BoolValue{Value: v}
+	case "google.protobuf.StringValue":
+		msg = &wrapperspb.StringValue{Value: value}
+	case "google.protobuf.BytesValue":
+		v, err := base64.URLEncoding.DecodeString(value)
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		msg = &wrapperspb.BytesValue{Value: v}
+	case "google.protobuf.FieldMask":
+		fm := &field_mask.FieldMask{}
+		fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
+		msg = fm
+	default:
+		return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
+	}
+
+	return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
new file mode 100644
index 000000000..5d8d12bc4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
@@ -0,0 +1,27 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "utilities",
+    srcs = [
+        "doc.go",
+        "pattern.go",
+        "readerfactory.go",
+        "trie.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
+)
+
+go_test(
+    name = "utilities_test",
+    size = "small",
+    srcs = ["trie_test.go"],
+    deps = [":utilities"],
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":utilities",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
new file mode 100644
index 000000000..cf79a4d58
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
@@ -0,0 +1,2 @@
+// Package utilities provides members for internal use in grpc-gateway.
+package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
new file mode 100644
index 000000000..dfe7de486
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
@@ -0,0 +1,22 @@
+package utilities
+
+// An OpCode is a opcode of compiled path patterns.
+type OpCode int
+
+// These constants are the valid values of OpCode.
+const (
+	// OpNop does nothing
+	OpNop = OpCode(iota)
+	// OpPush pushes a component to stack
+	OpPush
+	// OpLitPush pushes a component to stack if it matches to the literal
+	OpLitPush
+	// OpPushM concatenates the remaining components and pushes it to stack
+	OpPushM
+	// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
+	OpConcatN
+	// OpCapture pops an item and binds it to the variable
+	OpCapture
+	// OpEnd is the least positive invalid opcode.
+	OpEnd
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
new file mode 100644
index 000000000..6dd385466
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
@@ -0,0 +1,20 @@
+package utilities
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+)
+
+// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
+// at the start of the stream
+func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, err
+	}
+
+	return func() io.Reader {
+		return bytes.NewReader(b)
+	}, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
new file mode 100644
index 000000000..af3b703d5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
@@ -0,0 +1,174 @@
+package utilities
+
+import (
+	"sort"
+)
+
+// DoubleArray is a Double Array implementation of trie on sequences of strings.
+type DoubleArray struct {
+	// Encoding keeps an encoding from string to int
+	Encoding map[string]int
+	// Base is the base array of Double Array
+	Base []int
+	// Check is the check array of Double Array
+	Check []int
+}
+
+// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
+func NewDoubleArray(seqs [][]string) *DoubleArray {
+	da := &DoubleArray{Encoding: make(map[string]int)}
+	if len(seqs) == 0 {
+		return da
+	}
+
+	encoded := registerTokens(da, seqs)
+	sort.Sort(byLex(encoded))
+
+	root := node{row: -1, col: -1, left: 0, right: len(encoded)}
+	addSeqs(da, encoded, 0, root)
+
+	for i := len(da.Base); i > 0; i-- {
+		if da.Check[i-1] != 0 {
+			da.Base = da.Base[:i]
+			da.Check = da.Check[:i]
+			break
+		}
+	}
+	return da
+}
+
+func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
+	var result [][]int
+	for _, seq := range seqs {
+		var encoded []int
+		for _, token := range seq {
+			if _, ok := da.Encoding[token]; !ok {
+				da.Encoding[token] = len(da.Encoding)
+			}
+			encoded = append(encoded, da.Encoding[token])
+		}
+		result = append(result, encoded)
+	}
+	for i := range result {
+		result[i] = append(result[i], len(da.Encoding))
+	}
+	return result
+}
+
+type node struct {
+	row, col    int
+	left, right int
+}
+
+func (n node) value(seqs [][]int) int {
+	return seqs[n.row][n.col]
+}
+
+func (n node) children(seqs [][]int) []*node {
+	var result []*node
+	lastVal := int(-1)
+	last := new(node)
+	for i := n.left; i < n.right; i++ {
+		if lastVal == seqs[i][n.col+1] {
+			continue
+		}
+		last.right = i
+		last = &node{
+			row:  i,
+			col:  n.col + 1,
+			left: i,
+		}
+		result = append(result, last)
+	}
+	last.right = n.right
+	return result
+}
+
+func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
+	ensureSize(da, pos)
+
+	children := n.children(seqs)
+	var i int
+	for i = 1; ; i++ {
+		ok := func() bool {
+			for _, child := range children {
+				code := child.value(seqs)
+				j := i + code
+				ensureSize(da, j)
+				if da.Check[j] != 0 {
+					return false
+				}
+			}
+			return true
+		}()
+		if ok {
+			break
+		}
+	}
+	da.Base[pos] = i
+	for _, child := range children {
+		code := child.value(seqs)
+		j := i + code
+		da.Check[j] = pos + 1
+	}
+	terminator := len(da.Encoding)
+	for _, child := range children {
+		code := child.value(seqs)
+		if code == terminator {
+			continue
+		}
+		j := i + code
+		addSeqs(da, seqs, j, *child)
+	}
+}
+
+func ensureSize(da *DoubleArray, i int) {
+	for i >= len(da.Base) {
+		da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
+		da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
+	}
+}
+
+type byLex [][]int
+
+func (l byLex) Len() int      { return len(l) }
+func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l byLex) Less(i, j int) bool {
+	si := l[i]
+	sj := l[j]
+	var k int
+	for k = 0; k < len(si) && k < len(sj); k++ {
+		if si[k] < sj[k] {
+			return true
+		}
+		if si[k] > sj[k] {
+			return false
+		}
+	}
+	return k < len(sj)
+}
+
+// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
+func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
+	if len(da.Base) == 0 {
+		return false
+	}
+
+	var i int
+	for _, t := range seq {
+		code, ok := da.Encoding[t]
+		if !ok {
+			break
+		}
+		j := da.Base[i] + code
+		if len(da.Check) <= j || da.Check[j] != i+1 {
+			break
+		}
+		i = j
+	}
+	j := da.Base[i] + len(da.Encoding)
+	if len(da.Check) <= j || da.Check[j] != i+1 {
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/uptrace/bun/extra/bunotel/LICENSE b/vendor/github.com/uptrace/bun/extra/bunotel/LICENSE
new file mode 100644
index 000000000..7ec81810c
--- /dev/null
+++ b/vendor/github.com/uptrace/bun/extra/bunotel/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/uptrace/bun/extra/bunotel/README.md b/vendor/github.com/uptrace/bun/extra/bunotel/README.md
new file mode 100644
index 000000000..50b3e6c48
--- /dev/null
+++ b/vendor/github.com/uptrace/bun/extra/bunotel/README.md
@@ -0,0 +1,3 @@
+# OpenTelemetry instrumentation for Bun
+
+See [example](../example/opentelemetry) for details.
diff --git a/vendor/github.com/uptrace/bun/extra/bunotel/option.go b/vendor/github.com/uptrace/bun/extra/bunotel/option.go
new file mode 100644
index 000000000..dc294ffa5
--- /dev/null
+++ b/vendor/github.com/uptrace/bun/extra/bunotel/option.go
@@ -0,0 +1,32 @@
+package bunotel
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+)
+
+type Option func(h *QueryHook)
+
+// WithAttributes configures attributes that are used to create a span.
+func WithAttributes(attrs ...attribute.KeyValue) Option {
+	return func(h *QueryHook) {
+		h.attrs = append(h.attrs, attrs...)
+	}
+}
+
+// WithDBName configures a db.name attribute.
+func WithDBName(name string) Option {
+	return func(h *QueryHook) {
+		h.attrs = append(h.attrs, semconv.DBNameKey.String(name))
+	}
+}
+
+// WithFormattedQueries enables formatting of the query that is added
+// as the statement attribute to the trace.
+// This means that all placeholders and arguments will be filled first
+// and the query will contain all information as sent to the database.
+func WithFormattedQueries(format bool) Option {
+	return func(h *QueryHook) {
+		h.formatQueries = format
+	}
+}
diff --git a/vendor/github.com/uptrace/bun/extra/bunotel/otel.go b/vendor/github.com/uptrace/bun/extra/bunotel/otel.go
new file mode 100644
index 000000000..25000307d
--- /dev/null
+++ b/vendor/github.com/uptrace/bun/extra/bunotel/otel.go
@@ -0,0 +1,188 @@
+package bunotel
+
+import (
+	"context"
+	"database/sql"
+	"runtime"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/metric/global"
+	"go.opentelemetry.io/otel/metric/instrument"
+	semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
+	"go.opentelemetry.io/otel/trace"
+
+	"github.com/uptrace/bun"
+	"github.com/uptrace/bun/dialect"
+	"github.com/uptrace/bun/schema"
+	"github.com/uptrace/opentelemetry-go-extra/otelsql"
+)
+
+var (
+	tracer = otel.Tracer("github.com/uptrace/bun")
+	meter  = global.Meter("github.com/uptrace/bun")
+
+	queryHistogram, _ = meter.Int64Histogram(
+		"go.sql.query_timing",
+		instrument.WithDescription("Timing of processed queries"),
+		instrument.WithUnit("milliseconds"),
+	)
+)
+
+type QueryHook struct {
+	attrs         []attribute.KeyValue
+	formatQueries bool
+}
+
+var _ bun.QueryHook = (*QueryHook)(nil)
+
+func NewQueryHook(opts ...Option) *QueryHook {
+	h := new(QueryHook)
+	for _, opt := range opts {
+		opt(h)
+	}
+	return h
+}
+
+func (h *QueryHook) Init(db *bun.DB) {
+	labels := make([]attribute.KeyValue, 0, len(h.attrs)+1)
+	labels = append(labels, h.attrs...)
+	if sys := dbSystem(db); sys.Valid() {
+		labels = append(labels, sys)
+	}
+
+	otelsql.ReportDBStatsMetrics(db.DB, otelsql.WithAttributes(labels...))
+}
+
+func (h *QueryHook) BeforeQuery(ctx context.Context, event *bun.QueryEvent) context.Context {
+	ctx, _ = tracer.Start(ctx, "", trace.WithSpanKind(trace.SpanKindClient))
+	return ctx
+}
+
+func (h *QueryHook) AfterQuery(ctx context.Context, event *bun.QueryEvent) {
+	operation := event.Operation()
+	dbOperation := semconv.DBOperationKey.String(operation)
+
+	labels := make([]attribute.KeyValue, 0, len(h.attrs)+2)
+	labels = append(labels, h.attrs...)
+	labels = append(labels, dbOperation)
+	if event.IQuery != nil {
+		if tableName := event.IQuery.GetTableName(); tableName != "" {
+			labels = append(labels, semconv.DBSQLTableKey.String(tableName))
+		}
+	}
+
+	queryHistogram.Record(ctx, time.Since(event.StartTime).Milliseconds(), labels...)
+
+	span := trace.SpanFromContext(ctx)
+	if !span.IsRecording() {
+		return
+	}
+
+	span.SetName(operation)
+	defer span.End()
+
+	query := h.eventQuery(event)
+	fn, file, line := funcFileLine("github.com/uptrace/bun")
+
+	attrs := make([]attribute.KeyValue, 0, 10)
+	attrs = append(attrs, h.attrs...)
+	attrs = append(attrs,
+		dbOperation,
+		semconv.DBStatementKey.String(query),
+		semconv.CodeFunctionKey.String(fn),
+		semconv.CodeFilepathKey.String(file),
+		semconv.CodeLineNumberKey.Int(line),
+	)
+
+	if sys := dbSystem(event.DB); sys.Valid() {
+		attrs = append(attrs, sys)
+	}
+	if event.Result != nil {
+		if n, _ := event.Result.RowsAffected(); n > 0 {
+			attrs = append(attrs, attribute.Int64("db.rows_affected", n))
+		}
+	}
+
+	switch event.Err {
+	case nil, sql.ErrNoRows, sql.ErrTxDone:
+		// ignore
+	default:
+		span.RecordError(event.Err)
+		span.SetStatus(codes.Error, event.Err.Error())
+	}
+
+	span.SetAttributes(attrs...)
+}
+
+func funcFileLine(pkg string) (string, string, int) {
+	const depth = 16
+	var pcs [depth]uintptr
+	n := runtime.Callers(3, pcs[:])
+	ff := runtime.CallersFrames(pcs[:n])
+
+	var fn, file string
+	var line int
+	for {
+		f, ok := ff.Next()
+		if !ok {
+			break
+		}
+		fn, file, line = f.Function, f.File, f.Line
+		if !strings.Contains(fn, pkg) {
+			break
+		}
+	}
+
+	if ind := strings.LastIndexByte(fn, '/'); ind != -1 {
+		fn = fn[ind+1:]
+	}
+
+	return fn, file, line
+}
+
+func (h *QueryHook) eventQuery(event *bun.QueryEvent) string {
+	const softQueryLimit = 8000
+	const hardQueryLimit = 16000
+
+	var query string
+
+	if h.formatQueries && len(event.Query) <= softQueryLimit {
+		query = event.Query
+	} else {
+		query = unformattedQuery(event)
+	}
+
+	if len(query) > hardQueryLimit {
+		query = query[:hardQueryLimit]
+	}
+
+	return query
+}
+
+func unformattedQuery(event *bun.QueryEvent) string {
+	if event.IQuery != nil {
+		if b, err := event.IQuery.AppendQuery(schema.NewNopFormatter(), nil); err == nil {
+			return bytesToString(b)
+		}
+	}
+	return string(event.QueryTemplate)
+}
+
+func dbSystem(db *bun.DB) attribute.KeyValue {
+	switch db.Dialect().Name() {
+	case dialect.PG:
+		return semconv.DBSystemPostgreSQL
+	case dialect.MySQL:
+		return semconv.DBSystemMySQL
+	case dialect.SQLite:
+		return semconv.DBSystemSqlite
+	case dialect.MSSQL:
+		return semconv.DBSystemMSSQL
+	default:
+		return attribute.KeyValue{}
+	}
+}
diff --git a/vendor/github.com/uptrace/bun/extra/bunotel/safe.go b/vendor/github.com/uptrace/bun/extra/bunotel/safe.go
new file mode 100644
index 000000000..fab151a78
--- /dev/null
+++ b/vendor/github.com/uptrace/bun/extra/bunotel/safe.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package internal
+
+func bytesToString(b []byte) string {
+	return string(b)
+}
+
+func stringToBytes(s string) []byte {
+	return []byte(s)
+}
diff --git a/vendor/github.com/uptrace/bun/extra/bunotel/unsafe.go b/vendor/github.com/uptrace/bun/extra/bunotel/unsafe.go
new file mode 100644
index 000000000..23accd40e
--- /dev/null
+++ b/vendor/github.com/uptrace/bun/extra/bunotel/unsafe.go
@@ -0,0 +1,18 @@
+// +build !appengine
+
+package bunotel
+
+import "unsafe"
+
+func bytesToString(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+func stringToBytes(s string) []byte {
+	return *(*[]byte)(unsafe.Pointer(
+		&struct {
+			string
+			Cap int
+		}{s, len(s)},
+	))
+}
diff --git a/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/.golangci.yml b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/.golangci.yml
new file mode 100644
index 000000000..65b3c9e6e
--- /dev/null
+++ b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/.golangci.yml
@@ -0,0 +1,5 @@
+issues:
+  exclude-rules:
+    - text: 'Drivers should implement'
+      linters:
+        - staticcheck
diff --git a/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/LICENSE b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/LICENSE
new file mode 100644
index 000000000..83bbb00f4
--- /dev/null
+++ b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2020 github.com/uptrace/opentelemetry-go-extra Contributors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/README.md b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/README.md
new file mode 100644
index 000000000..dbded166d
--- /dev/null
+++ b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/README.md
@@ -0,0 +1,118 @@
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/opentelemetry-go-extra/otelsql)](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql)
+
+# database/sql instrumentation for OpenTelemetry Go
+
+[database/sql OpenTelemetry instrumentation](https://uptrace.dev/opentelemetry/instrumentations/go-database-sql.html)
+records database queries (including `Tx` and `Stmt` queries) and reports `DBStats` metrics.
+
+## Installation
+
+```shell
+go get github.com/uptrace/opentelemetry-go-extra/otelsql
+```
+
+## Usage
+
+To instrument database/sql, you need to connect to a database using the API provided by otelsql:
+
+| sql                         | otelsql                         |
+| --------------------------- | ------------------------------- |
+| `sql.Open(driverName, dsn)` | `otelsql.Open(driverName, dsn)` |
+| `sql.OpenDB(connector)`     | `otelsql.OpenDB(connector)`     |
+
+```go
+import (
+	"github.com/uptrace/opentelemetry-go-extra/otelsql"
+	semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
+)
+
+db, err := otelsql.Open("sqlite", "file::memory:?cache=shared",
+	otelsql.WithAttributes(semconv.DBSystemSqlite),
+	otelsql.WithDBName("mydb"))
+if err != nil {
+	panic(err)
+}
+
+// db is *sql.DB
+```
+
+And then use context-aware API to propagate the active span via
+[context](https://uptrace.dev/opentelemetry/go-tracing.html#context):
+
+```go
+var num int
+if err := db.QueryRowContext(ctx, "SELECT 42").Scan(&num); err != nil {
+	panic(err)
+}
+```
+
+See [example](/example/) for details.
+
+## Options
+
+Both [otelsql.Open](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#Open) and
+[otelsql.OpenDB](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#OpenDB) accept
+the same [options](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#Option):
+
+- [WithAttributes](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#WithAttributes)
+  configures attributes that are used to create a span.
+- [WithDBName](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#WithDBName)
+  configures a `db.name` attribute.
+- [WithDBSystem](https://pkg.go.dev/github.com/uptrace/opentelemetry-go-extra/otelsql#WithDBSystem)
+  configures a `db.system` attribute. When possible, you should prefer using WithAttributes and
+  [semconv](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.10.0), for example,
+  `otelsql.WithAttributes(semconv.DBSystemSqlite)`.
+
+## sqlboiler
+
+You can use otelsql to instrument [sqlboiler](https://github.com/volatiletech/sqlboiler) ORM:
+
+```go
+import (
+    "github.com/uptrace/opentelemetry-go-extra/otelsql"
+    semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
+)
+
+db, err := otelsql.Open("postgres", "dbname=fun user=abc",
+    otelsql.WithAttributes(semconv.DBSystemPostgreSQL))
+if err != nil {
+  return err
+}
+
+boil.SetDB(db)
+```
+
+## GORM 1
+
+You can use otelsql to instrument [GORM 1](https://v1.gorm.io/):
+
+```go
+import (
+    "github.com/jinzhu/gorm"
+    "github.com/uptrace/opentelemetry-go-extra/otelsql"
+    semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
+)
+
+// gormOpen is like gorm.Open, but it uses otelsql to instrument the database.
+func gormOpen(driverName, dataSourceName string, opts ...otelsql.Option) (*gorm.DB, error) {
+	db, err := otelsql.Open(driverName, dataSourceName, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return gorm.Open(driverName, db)
+}
+
+db, err := gormOpen("mysql", "user:password@/dbname",
+    otelsql.WithAttributes(semconv.DBSystemMySQL))
+if err != nil {
+    panic(err)
+}
+```
+
+To instrument GORM 2, use
+[otelgorm](https://github.com/uptrace/opentelemetry-go-extra/tree/main/otelgorm).
+
+## Alternatives
+
+- https://github.com/XSAM/otelsql - different driver registration and no metrics.
+- https://github.com/j2gg0s/otsql - like XSAM/otelsql but with Prometheus metrics.
diff --git a/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/driver.go b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/driver.go
new file mode 100644
index 000000000..056af3c6c
--- /dev/null
+++ b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/driver.go
@@ -0,0 +1,460 @@
+package otelsql
+
+import (
+	"context"
+	"database/sql"
+	"database/sql/driver"
+	"errors"
+
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Open is a wrapper over sql.Open that instruments the sql.DB to record executed queries
+// using OpenTelemetry API.
+func Open(driverName, dsn string, opts ...Option) (*sql.DB, error) {
+	db, err := sql.Open(driverName, dsn)
+	if err != nil {
+		return nil, err
+	}
+	return patchDB(db, dsn, opts...)
+}
+
+func patchDB(db *sql.DB, dsn string, opts ...Option) (*sql.DB, error) {
+	dbDriver := db.Driver()
+	d := newDriver(dbDriver, opts)
+
+	if _, ok := dbDriver.(driver.DriverContext); ok {
+		connector, err := d.OpenConnector(dsn)
+		if err != nil {
+			return nil, err
+		}
+		return sqlOpenDB(connector, d.instrum), nil
+	}
+
+	return sqlOpenDB(&dsnConnector{
+		driver: d,
+		dsn:    dsn,
+	}, d.instrum), nil
+}
+
+// OpenDB is a wrapper over sql.OpenDB that instruments the sql.DB to record executed queries
+// using OpenTelemetry API.
+func OpenDB(connector driver.Connector, opts ...Option) *sql.DB {
+	instrum := newDBInstrum(opts)
+	c := newConnector(connector.Driver(), connector, instrum)
+	return sqlOpenDB(c, instrum)
+}
+
+func sqlOpenDB(connector driver.Connector, instrum *dbInstrum) *sql.DB {
+	db := sql.OpenDB(connector)
+	ReportDBStatsMetrics(db, WithMeterProvider(instrum.meterProvider), WithAttributes(instrum.attrs...))
+	return db
+}
+
+type dsnConnector struct {
+	driver *otelDriver
+	dsn    string
+}
+
+func (c *dsnConnector) Connect(ctx context.Context) (driver.Conn, error) {
+	var conn driver.Conn
+	err := c.driver.instrum.withSpan(ctx, "db.Connect", "",
+		func(ctx context.Context, span trace.Span) error {
+			var err error
+			conn, err = c.driver.Open(c.dsn)
+			return err
+		})
+	return conn, err
+}
+
+func (c *dsnConnector) Driver() driver.Driver {
+	return c.driver
+}
+
+//------------------------------------------------------------------------------
+
+type otelDriver struct {
+	driver    driver.Driver
+	driverCtx driver.DriverContext
+	instrum   *dbInstrum
+}
+
+var _ driver.DriverContext = (*otelDriver)(nil)
+
+func newDriver(dr driver.Driver, opts []Option) *otelDriver {
+	driverCtx, _ := dr.(driver.DriverContext)
+	d := &otelDriver{
+		driver:    dr,
+		driverCtx: driverCtx,
+		instrum:   newDBInstrum(opts),
+	}
+	return d
+}
+
+func (d *otelDriver) Open(name string) (driver.Conn, error) {
+	conn, err := d.driver.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	return newConn(conn, d.instrum), nil
+}
+
+func (d *otelDriver) OpenConnector(dsn string) (driver.Connector, error) {
+	connector, err := d.driverCtx.OpenConnector(dsn)
+	if err != nil {
+		return nil, err
+	}
+	return newConnector(d, connector, d.instrum), nil
+}
+
+//------------------------------------------------------------------------------
+
+type connector struct {
+	driver.Connector
+	driver  driver.Driver
+	instrum *dbInstrum
+}
+
+var _ driver.Connector = (*connector)(nil)
+
+func newConnector(d driver.Driver, c driver.Connector, instrum *dbInstrum) *connector {
+	return &connector{
+		driver:    d,
+		Connector: c,
+		instrum:   instrum,
+	}
+}
+
+func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
+	var conn driver.Conn
+	if err := c.instrum.withSpan(ctx, "db.Connect", "",
+		func(ctx context.Context, span trace.Span) error {
+			var err error
+			conn, err = c.Connector.Connect(ctx)
+			return err
+		}); err != nil {
+		return nil, err
+	}
+	return newConn(conn, c.instrum), nil
+}
+
+func (c *connector) Driver() driver.Driver {
+	return c.driver
+}
+
+//------------------------------------------------------------------------------
+
+type otelConn struct {
+	driver.Conn
+
+	instrum *dbInstrum
+
+	ping            pingFunc
+	exec            execFunc
+	execCtx         execCtxFunc
+	query           queryFunc
+	queryCtx        queryCtxFunc
+	prepareCtx      prepareCtxFunc
+	beginTx         beginTxFunc
+	resetSession    resetSessionFunc
+	checkNamedValue checkNamedValueFunc
+}
+
+var _ driver.Conn = (*otelConn)(nil)
+
+func newConn(conn driver.Conn, instrum *dbInstrum) *otelConn {
+	cn := &otelConn{
+		Conn:    conn,
+		instrum: instrum,
+	}
+
+	cn.ping = cn.createPingFunc(conn)
+	cn.exec = cn.createExecFunc(conn)
+	cn.execCtx = cn.createExecCtxFunc(conn)
+	cn.query = cn.createQueryFunc(conn)
+	cn.queryCtx = cn.createQueryCtxFunc(conn)
+	cn.prepareCtx = cn.createPrepareCtxFunc(conn)
+	cn.beginTx = cn.createBeginTxFunc(conn)
+	cn.resetSession = cn.createResetSessionFunc(conn)
+	cn.checkNamedValue = cn.createCheckNamedValueFunc(conn)
+
+	return cn
+}
+
+var _ driver.Pinger = (*otelConn)(nil)
+
+func (c *otelConn) Ping(ctx context.Context) error {
+	return c.ping(ctx)
+}
+
+type pingFunc func(ctx context.Context) error
+
+func (c *otelConn) createPingFunc(conn driver.Conn) pingFunc {
+	if pinger, ok := conn.(driver.Pinger); ok {
+		return func(ctx context.Context) error {
+			return c.instrum.withSpan(ctx, "db.Ping", "",
+				func(ctx context.Context, span trace.Span) error {
+					return pinger.Ping(ctx)
+				})
+		}
+	}
+	return func(ctx context.Context) error {
+		return driver.ErrSkip
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var _ driver.Execer = (*otelConn)(nil)
+
+func (c *otelConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+	return c.exec(query, args)
+}
+
+type execFunc func(query string, args []driver.Value) (driver.Result, error)
+
+func (c *otelConn) createExecFunc(conn driver.Conn) execFunc {
+	if execer, ok := conn.(driver.Execer); ok {
+		return func(query string, args []driver.Value) (driver.Result, error) {
+			return execer.Exec(query, args)
+		}
+	}
+	return func(query string, args []driver.Value) (driver.Result, error) {
+		return nil, driver.ErrSkip
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var _ driver.ExecerContext = (*otelConn)(nil)
+
+func (c *otelConn) ExecContext(
+	ctx context.Context, query string, args []driver.NamedValue,
+) (driver.Result, error) {
+	return c.execCtx(ctx, query, args)
+}
+
+type execCtxFunc func(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error)
+
+func (c *otelConn) createExecCtxFunc(conn driver.Conn) execCtxFunc {
+	var fn execCtxFunc
+
+	if execer, ok := conn.(driver.ExecerContext); ok {
+		fn = execer.ExecContext
+	} else {
+		fn = func(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+			vArgs, err := namedValueToValue(args)
+			if err != nil {
+				return nil, err
+			}
+			return c.exec(query, vArgs)
+		}
+	}
+
+	return func(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+		var res driver.Result
+		if err := c.instrum.withSpan(ctx, "db.Exec", query,
+			func(ctx context.Context, span trace.Span) error {
+				var err error
+				res, err = fn(ctx, query, args)
+				if err != nil {
+					return err
+				}
+
+				if span.IsRecording() {
+					rows, err := res.RowsAffected()
+					if err == nil {
+						span.SetAttributes(dbRowsAffected.Int64(rows))
+					}
+				}
+
+				return nil
+			}); err != nil {
+			return nil, err
+		}
+		return res, nil
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var _ driver.Queryer = (*otelConn)(nil)
+
+func (c *otelConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+	return c.query(query, args)
+}
+
+type queryFunc func(query string, args []driver.Value) (driver.Rows, error)
+
+func (c *otelConn) createQueryFunc(conn driver.Conn) queryFunc {
+	if queryer, ok := c.Conn.(driver.Queryer); ok {
+		return func(query string, args []driver.Value) (driver.Rows, error) {
+			return queryer.Query(query, args)
+		}
+	}
+	return func(query string, args []driver.Value) (driver.Rows, error) {
+		return nil, driver.ErrSkip
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var _ driver.QueryerContext = (*otelConn)(nil)
+
+func (c *otelConn) QueryContext(
+	ctx context.Context, query string, args []driver.NamedValue,
+) (driver.Rows, error) {
+	return c.queryCtx(ctx, query, args)
+}
+
+type queryCtxFunc func(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error)
+
+func (c *otelConn) createQueryCtxFunc(conn driver.Conn) queryCtxFunc {
+	var fn queryCtxFunc
+
+	if queryer, ok := c.Conn.(driver.QueryerContext); ok {
+		fn = queryer.QueryContext
+	} else {
+		fn = func(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+			vArgs, err := namedValueToValue(args)
+			if err != nil {
+				return nil, err
+			}
+			return c.query(query, vArgs)
+		}
+	}
+
+	return func(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+		var rows driver.Rows
+		err := c.instrum.withSpan(ctx, "db.Query", query,
+			func(ctx context.Context, span trace.Span) error {
+				var err error
+				rows, err = fn(ctx, query, args)
+				return err
+			})
+		return rows, err
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var _ driver.ConnPrepareContext = (*otelConn)(nil)
+
+func (c *otelConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+	return c.prepareCtx(ctx, query)
+}
+
+type prepareCtxFunc func(ctx context.Context, query string) (driver.Stmt, error)
+
+func (c *otelConn) createPrepareCtxFunc(conn driver.Conn) prepareCtxFunc {
+	var fn prepareCtxFunc
+
+	if preparer, ok := c.Conn.(driver.ConnPrepareContext); ok {
+		fn = preparer.PrepareContext
+	} else {
+		fn = func(ctx context.Context, query string) (driver.Stmt, error) {
+			return c.Conn.Prepare(query)
+		}
+	}
+
+	return func(ctx context.Context, query string) (driver.Stmt, error) {
+		var stmt driver.Stmt
+		if err := c.instrum.withSpan(ctx, "db.Prepare", query,
+			func(ctx context.Context, span trace.Span) error {
+				var err error
+				stmt, err = fn(ctx, query)
+				return err
+			}); err != nil {
+			return nil, err
+		}
+		return newStmt(stmt, query, c.instrum), nil
+	}
+}
+
+var _ driver.ConnBeginTx = (*otelConn)(nil)
+
+func (c *otelConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+	return c.beginTx(ctx, opts)
+}
+
+type beginTxFunc func(ctx context.Context, opts driver.TxOptions) (driver.Tx, error)
+
+func (c *otelConn) createBeginTxFunc(conn driver.Conn) beginTxFunc {
+	var fn beginTxFunc
+
+	if txor, ok := conn.(driver.ConnBeginTx); ok {
+		fn = txor.BeginTx
+	} else {
+		fn = func(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+			return conn.Begin()
+		}
+	}
+
+	return func(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+		var tx driver.Tx
+		if err := c.instrum.withSpan(ctx, "db.Begin", "",
+			func(ctx context.Context, span trace.Span) error {
+				var err error
+				tx, err = fn(ctx, opts)
+				return err
+			}); err != nil {
+			return nil, err
+		}
+		return newTx(ctx, tx, c.instrum), nil
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var _ driver.SessionResetter = (*otelConn)(nil)
+
+func (c *otelConn) ResetSession(ctx context.Context) error {
+	return c.resetSession(ctx)
+}
+
+type resetSessionFunc func(ctx context.Context) error
+
+func (c *otelConn) createResetSessionFunc(conn driver.Conn) resetSessionFunc {
+	if resetter, ok := c.Conn.(driver.SessionResetter); ok {
+		return func(ctx context.Context) error {
+			return resetter.ResetSession(ctx)
+		}
+	}
+	return func(ctx context.Context) error {
+		return driver.ErrSkip
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var _ driver.NamedValueChecker = (*otelConn)(nil)
+
+func (c *otelConn) CheckNamedValue(value *driver.NamedValue) error {
+	return c.checkNamedValue(value)
+}
+
+type checkNamedValueFunc func(*driver.NamedValue) error
+
+func (c *otelConn) createCheckNamedValueFunc(conn driver.Conn) checkNamedValueFunc {
+	if checker, ok := c.Conn.(driver.NamedValueChecker); ok {
+		return func(value *driver.NamedValue) error {
+			return checker.CheckNamedValue(value)
+		}
+	}
+	return func(value *driver.NamedValue) error {
+		return driver.ErrSkip
+	}
+}
+
+//------------------------------------------------------------------------------
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+	args := make([]driver.Value, len(named))
+	for n, param := range named {
+		if len(param.Name) > 0 {
+			return nil, errors.New("otelsql: driver does not support named parameters")
+		}
+		args[n] = param.Value
+	}
+	return args, nil
+}
diff --git a/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/otel.go b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/otel.go
new file mode 100644
index 000000000..0932e2759
--- /dev/null
+++ b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/otel.go
@@ -0,0 +1,254 @@
+package otelsql
+
+import (
+	"context"
+	"database/sql"
+	"database/sql/driver"
+	"io"
+	"time"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/global"
+	"go.opentelemetry.io/otel/metric/instrument"
+	semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
+	"go.opentelemetry.io/otel/trace"
+)
+
+const instrumName = "github.com/uptrace/opentelemetry-go-extra/otelsql"
+
+var dbRowsAffected = attribute.Key("db.rows_affected")
+
+type config struct {
+	tracerProvider trace.TracerProvider
+	tracer         trace.Tracer //nolint:structcheck
+
+	meterProvider metric.MeterProvider
+	meter         metric.Meter
+
+	attrs []attribute.KeyValue
+
+	queryFormatter func(query string) string
+}
+
+func newConfig(opts []Option) *config {
+	c := &config{
+		tracerProvider: otel.GetTracerProvider(),
+		meterProvider:  global.MeterProvider(),
+	}
+	for _, opt := range opts {
+		opt(c)
+	}
+	return c
+}
+
+func (c *config) formatQuery(query string) string {
+	if c.queryFormatter != nil {
+		return c.queryFormatter(query)
+	}
+	return query
+}
+
+type dbInstrum struct {
+	*config
+
+	queryHistogram instrument.Int64Histogram
+}
+
+func newDBInstrum(opts []Option) *dbInstrum {
+	t := &dbInstrum{
+		config: newConfig(opts),
+	}
+
+	if t.tracer == nil {
+		t.tracer = t.tracerProvider.Tracer(instrumName)
+	}
+	if t.meter == nil {
+		t.meter = t.meterProvider.Meter(instrumName)
+	}
+
+	var err error
+	t.queryHistogram, err = t.meter.Int64Histogram(
+		"go.sql.query_timing",
+		instrument.WithDescription("Timing of processed queries"),
+		instrument.WithUnit("milliseconds"),
+	)
+	if err != nil {
+		panic(err)
+	}
+
+	return t
+}
+
+func (t *dbInstrum) withSpan(
+	ctx context.Context,
+	spanName string,
+	query string,
+	fn func(ctx context.Context, span trace.Span) error,
+) error {
+	var startTime time.Time
+	if query != "" {
+		startTime = time.Now()
+	}
+
+	attrs := make([]attribute.KeyValue, 0, len(t.attrs)+1)
+	attrs = append(attrs, t.attrs...)
+	if query != "" {
+		attrs = append(attrs, semconv.DBStatementKey.String(t.formatQuery(query)))
+	}
+
+	ctx, span := t.tracer.Start(ctx, spanName,
+		trace.WithSpanKind(trace.SpanKindClient),
+		trace.WithAttributes(attrs...))
+	err := fn(ctx, span)
+	span.End()
+
+	if query != "" {
+		t.queryHistogram.Record(ctx, time.Since(startTime).Milliseconds(), t.attrs...)
+	}
+
+	if !span.IsRecording() {
+		return err
+	}
+
+	switch err {
+	case nil,
+		driver.ErrSkip,
+		io.EOF, // end of rows iterator
+		sql.ErrNoRows:
+		// ignore
+	default:
+		span.RecordError(err)
+		span.SetStatus(codes.Error, err.Error())
+	}
+
+	return err
+}
+
+type Option func(c *config)
+
+// WithTracerProvider configures a tracer provider that is used to create a tracer.
+func WithTracerProvider(tracerProvider trace.TracerProvider) Option {
+	return func(c *config) {
+		c.tracerProvider = tracerProvider
+	}
+}
+
+// WithAttributes configures attributes that are used to create a span.
+func WithAttributes(attrs ...attribute.KeyValue) Option {
+	return func(c *config) {
+		c.attrs = append(c.attrs, attrs...)
+	}
+}
+
+// WithDBSystem configures a db.system attribute. You should prefer using
+// WithAttributes and semconv, for example, `otelsql.WithAttributes(semconv.DBSystemSqlite)`.
+func WithDBSystem(system string) Option {
+	return func(c *config) {
+		c.attrs = append(c.attrs, semconv.DBSystemKey.String(system))
+	}
+}
+
+// WithDBName configures a db.name attribute.
+func WithDBName(name string) Option {
+	return func(c *config) {
+		c.attrs = append(c.attrs, semconv.DBNameKey.String(name))
+	}
+}
+
+// WithMeterProvider configures a metric.Meter used to create instruments.
+func WithMeterProvider(meterProvider metric.MeterProvider) Option {
+	return func(c *config) {
+		c.meterProvider = meterProvider
+	}
+}
+
+// WithQueryFormatter configures a query formatter
+func WithQueryFormatter(queryFormatter func(query string) string) Option {
+	return func(c *config) {
+		c.queryFormatter = queryFormatter
+	}
+}
+
+// ReportDBStatsMetrics reports DBStats metrics using OpenTelemetry Metrics API.
+func ReportDBStatsMetrics(db *sql.DB, opts ...Option) {
+	cfg := newConfig(opts)
+
+	if cfg.meter == nil {
+		cfg.meter = cfg.meterProvider.Meter(instrumName)
+	}
+
+	meter := cfg.meter
+	labels := cfg.attrs
+
+	maxOpenConns, _ := meter.Int64ObservableGauge(
+		"go.sql.connections_max_open",
+		instrument.WithDescription("Maximum number of open connections to the database"),
+	)
+	openConns, _ := meter.Int64ObservableGauge(
+		"go.sql.connections_open",
+		instrument.WithDescription("The number of established connections both in use and idle"),
+	)
+	inUseConns, _ := meter.Int64ObservableGauge(
+		"go.sql.connections_in_use",
+		instrument.WithDescription("The number of connections currently in use"),
+	)
+	idleConns, _ := meter.Int64ObservableGauge(
+		"go.sql.connections_idle",
+		instrument.WithDescription("The number of idle connections"),
+	)
+	connsWaitCount, _ := meter.Int64ObservableCounter(
+		"go.sql.connections_wait_count",
+		instrument.WithDescription("The total number of connections waited for"),
+	)
+	connsWaitDuration, _ := meter.Int64ObservableCounter(
+		"go.sql.connections_wait_duration",
+		instrument.WithDescription("The total time blocked waiting for a new connection"),
+		instrument.WithUnit("nanoseconds"),
+	)
+	connsClosedMaxIdle, _ := meter.Int64ObservableCounter(
+		"go.sql.connections_closed_max_idle",
+		instrument.WithDescription("The total number of connections closed due to SetMaxIdleConns"),
+	)
+	connsClosedMaxIdleTime, _ := meter.Int64ObservableCounter(
+		"go.sql.connections_closed_max_idle_time",
+		instrument.WithDescription("The total number of connections closed due to SetConnMaxIdleTime"),
+	)
+	connsClosedMaxLifetime, _ := meter.Int64ObservableCounter(
+		"go.sql.connections_closed_max_lifetime",
+		instrument.WithDescription("The total number of connections closed due to SetConnMaxLifetime"),
+	)
+
+	if _, err := meter.RegisterCallback(
+		func(ctx context.Context, o metric.Observer) error {
+			stats := db.Stats()
+
+			o.ObserveInt64(maxOpenConns, int64(stats.MaxOpenConnections), labels...)
+
+			o.ObserveInt64(openConns, int64(stats.OpenConnections), labels...)
+			o.ObserveInt64(inUseConns, int64(stats.InUse), labels...)
+			o.ObserveInt64(idleConns, int64(stats.Idle), labels...)
+
+			o.ObserveInt64(connsWaitCount, stats.WaitCount, labels...)
+			o.ObserveInt64(connsWaitDuration, int64(stats.WaitDuration), labels...)
+			o.ObserveInt64(connsClosedMaxIdle, stats.MaxIdleClosed, labels...)
+			o.ObserveInt64(connsClosedMaxIdleTime, stats.MaxIdleTimeClosed, labels...)
+			o.ObserveInt64(connsClosedMaxLifetime, stats.MaxLifetimeClosed, labels...)
+
+			return nil
+		},
+		maxOpenConns,
+		openConns,
+		inUseConns,
+		idleConns,
+		connsWaitCount,
+		connsWaitDuration,
+		connsClosedMaxIdle,
+		connsClosedMaxIdleTime,
+		connsClosedMaxLifetime,
+	); err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/stmt.go b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/stmt.go
new file mode 100644
index 000000000..e87a1e73f
--- /dev/null
+++ b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/stmt.go
@@ -0,0 +1,120 @@
+package otelsql
+
+import (
+	"context"
+	"database/sql/driver"
+
+	"go.opentelemetry.io/otel/trace"
+)
+
+type otelStmt struct {
+	driver.Stmt
+
+	query   string
+	instrum *dbInstrum
+
+	execCtx  stmtExecCtxFunc
+	queryCtx stmtQueryCtxFunc
+}
+
+var _ driver.Stmt = (*otelStmt)(nil)
+
+func newStmt(stmt driver.Stmt, query string, instrum *dbInstrum) *otelStmt {
+	s := &otelStmt{
+		Stmt:    stmt,
+		query:   query,
+		instrum: instrum,
+	}
+	s.execCtx = s.createExecCtxFunc(stmt)
+	s.queryCtx = s.createQueryCtxFunc(stmt)
+	return s
+}
+
+//------------------------------------------------------------------------------
+
+var _ driver.StmtExecContext = (*otelStmt)(nil)
+
+func (stmt *otelStmt) ExecContext(
+	ctx context.Context, args []driver.NamedValue,
+) (driver.Result, error) {
+	return stmt.execCtx(ctx, args)
+}
+
+type stmtExecCtxFunc func(ctx context.Context, args []driver.NamedValue) (driver.Result, error)
+
+func (s *otelStmt) createExecCtxFunc(stmt driver.Stmt) stmtExecCtxFunc {
+	var fn stmtExecCtxFunc
+
+	if execer, ok := s.Stmt.(driver.StmtExecContext); ok {
+		fn = execer.ExecContext
+	} else {
+		fn = func(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+			vArgs, err := namedValueToValue(args)
+			if err != nil {
+				return nil, err
+			}
+			return stmt.Exec(vArgs)
+		}
+	}
+
+	return func(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+		var res driver.Result
+		err := s.instrum.withSpan(ctx, "stmt.Exec", s.query,
+			func(ctx context.Context, span trace.Span) error {
+				var err error
+				res, err = fn(ctx, args)
+				if err != nil {
+					return err
+				}
+
+				if span.IsRecording() {
+					rows, err := res.RowsAffected()
+					if err == nil {
+						span.SetAttributes(dbRowsAffected.Int64(rows))
+					}
+				}
+
+				return nil
+			})
+		return res, err
+	}
+}
+
+//------------------------------------------------------------------------------
+
+var _ driver.StmtQueryContext = (*otelStmt)(nil)
+
+func (stmt *otelStmt) QueryContext(
+	ctx context.Context, args []driver.NamedValue,
+) (driver.Rows, error) {
+	return stmt.queryCtx(ctx, args)
+}
+
+type stmtQueryCtxFunc func(ctx context.Context, args []driver.NamedValue) (driver.Rows, error)
+
+func (s *otelStmt) createQueryCtxFunc(stmt driver.Stmt) stmtQueryCtxFunc {
+	var fn stmtQueryCtxFunc
+
+	if queryer, ok := s.Stmt.(driver.StmtQueryContext); ok {
+		fn = queryer.QueryContext
+	} else {
+		fn = func(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+			vArgs, err := namedValueToValue(args)
+			if err != nil {
+				return nil, err
+			}
+			return s.Query(vArgs)
+		}
+	}
+
+	return func(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+		var rows driver.Rows
+		err := s.instrum.withSpan(ctx, "stmt.Query", s.query,
+			func(ctx context.Context, span trace.Span) error {
+				var err error
+				rows, err = fn(ctx, args)
+				return err
+			})
+		return rows, err
+	}
+}
diff --git a/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/tx.go b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/tx.go
new file mode 100644
index 000000000..c4bd55e13
--- /dev/null
+++ b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/tx.go
@@ -0,0 +1,38 @@
+package otelsql
+
+import (
+	"context"
+	"database/sql/driver"
+
+	"go.opentelemetry.io/otel/trace"
+)
+
+type otelTx struct {
+	ctx     context.Context
+	tx      driver.Tx
+	instrum *dbInstrum
+}
+
+var _ driver.Tx = (*otelTx)(nil)
+
+func newTx(ctx context.Context, tx driver.Tx, instrum *dbInstrum) *otelTx {
+	return &otelTx{
+		ctx:     ctx,
+		tx:      tx,
+		instrum: instrum,
+	}
+}
+
+func (tx *otelTx) Commit() error {
+	return tx.instrum.withSpan(tx.ctx, "tx.Commit", "",
+		func(ctx context.Context, span trace.Span) error {
+			return tx.tx.Commit()
+		})
+}
+
+func (tx *otelTx) Rollback() error {
+	return tx.instrum.withSpan(tx.ctx, "tx.Rollback", "",
+		func(ctx context.Context, span trace.Span) error {
+			return tx.tx.Rollback()
+		})
+}
diff --git a/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/version.go b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/version.go
new file mode 100644
index 000000000..97134301d
--- /dev/null
+++ b/vendor/github.com/uptrace/opentelemetry-go-extra/otelsql/version.go
@@ -0,0 +1,6 @@
+package otelsql
+
+// Version is the current release version.
+func Version() string {
+	return "0.1.21"
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/doc.go
new file mode 100644
index 000000000..3b1a541bc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/doc.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package otelgin instruments the github.com/gin-gonic/gin package.
+//
+// Currently there are two ways the code can be instrumented. One is
+// instrumenting the routing of a received message (the Middleware function)
+// and instrumenting the response generation through template evaluation (the
+// HTML function).
+package otelgin // import "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/gintrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/gintrace.go
new file mode 100644
index 000000000..6e445967b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/gintrace.go
@@ -0,0 +1,142 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Based on https://github.com/DataDog/dd-trace-go/blob/8fb554ff7cf694267f9077ae35e27ce4689ed8b6/contrib/gin-gonic/gin/gintrace.go
+
+package otelgin // import "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
+
+import (
+	"fmt"
+
+	"github.com/gin-gonic/gin"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/codes"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/propagation"
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+	"go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
+	oteltrace "go.opentelemetry.io/otel/trace"
+)
+
+const (
+	tracerKey  = "otel-go-contrib-tracer"
+	tracerName = "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
+)
+
+// Middleware returns middleware that will trace incoming requests.
+// The service parameter should describe the name of the (virtual)
+// server handling the request.
+func Middleware(service string, opts ...Option) gin.HandlerFunc {
+	cfg := config{}
+	for _, opt := range opts {
+		opt.apply(&cfg)
+	}
+	if cfg.TracerProvider == nil {
+		cfg.TracerProvider = otel.GetTracerProvider()
+	}
+	tracer := cfg.TracerProvider.Tracer(
+		tracerName,
+		oteltrace.WithInstrumentationVersion(SemVersion()),
+	)
+	if cfg.Propagators == nil {
+		cfg.Propagators = otel.GetTextMapPropagator()
+	}
+	return func(c *gin.Context) {
+		for _, f := range cfg.Filters {
+			if !f(c.Request) {
+				// Serve the request to the next middleware
+				// if a filter rejects the request.
+				c.Next()
+				return
+			}
+		}
+		c.Set(tracerKey, tracer)
+		savedCtx := c.Request.Context()
+		defer func() {
+			c.Request = c.Request.WithContext(savedCtx)
+		}()
+		ctx := cfg.Propagators.Extract(savedCtx, propagation.HeaderCarrier(c.Request.Header))
+		opts := []oteltrace.SpanStartOption{
+			oteltrace.WithAttributes(httpconv.ServerRequest(service, c.Request)...),
+			oteltrace.WithSpanKind(oteltrace.SpanKindServer),
+		}
+		var spanName string
+		if cfg.SpanNameFormatter == nil {
+			spanName = c.FullPath()
+		} else {
+			spanName = cfg.SpanNameFormatter(c.Request)
+		}
+		if spanName == "" {
+			spanName = fmt.Sprintf("HTTP %s route not found", c.Request.Method)
+		} else {
+			rAttr := semconv.HTTPRoute(spanName)
+			opts = append(opts, oteltrace.WithAttributes(rAttr))
+		}
+		ctx, span := tracer.Start(ctx, spanName, opts...)
+		defer span.End()
+
+		// pass the span through the request context
+		c.Request = c.Request.WithContext(ctx)
+
+		// serve the request to the next middleware
+		c.Next()
+
+		status := c.Writer.Status()
+		span.SetStatus(httpconv.ServerStatus(status))
+		if status > 0 {
+			span.SetAttributes(semconv.HTTPStatusCode(status))
+		}
+		if len(c.Errors) > 0 {
+			span.SetAttributes(attribute.String("gin.errors", c.Errors.String()))
+		}
+	}
+}
+
+// HTML will trace the rendering of the template as a child of the
+// span in the given context. This is a replacement for
+// gin.Context.HTML function - it invokes the original function after
+// setting up the span.
+func HTML(c *gin.Context, code int, name string, obj interface{}) {
+	var tracer oteltrace.Tracer
+	tracerInterface, ok := c.Get(tracerKey)
+	if ok {
+		tracer, ok = tracerInterface.(oteltrace.Tracer)
+	}
+	if !ok {
+		tracer = otel.GetTracerProvider().Tracer(
+			tracerName,
+			oteltrace.WithInstrumentationVersion(SemVersion()),
+		)
+	}
+	savedContext := c.Request.Context()
+	defer func() {
+		c.Request = c.Request.WithContext(savedContext)
+	}()
+	opt := oteltrace.WithAttributes(attribute.String("go.template", name))
+	_, span := tracer.Start(savedContext, "gin.renderer.html", opt)
+	defer func() {
+		if r := recover(); r != nil {
+			err := fmt.Errorf("error rendering template:%s: %s", name, r)
+			span.RecordError(err)
+			span.SetStatus(codes.Error, "template failure")
+			span.End()
+			panic(r)
+		} else {
+			span.End()
+		}
+	}()
+	c.HTML(code, name, obj)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/option.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/option.go
new file mode 100644
index 000000000..c13c3dad8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/option.go
@@ -0,0 +1,90 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Based on https://github.com/DataDog/dd-trace-go/blob/8fb554ff7cf694267f9077ae35e27ce4689ed8b6/contrib/gin-gonic/gin/option.go
+
+package otelgin // import "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
+
+import (
+	"net/http"
+
+	"go.opentelemetry.io/otel/propagation"
+	oteltrace "go.opentelemetry.io/otel/trace"
+)
+
+type config struct {
+	TracerProvider    oteltrace.TracerProvider
+	Propagators       propagation.TextMapPropagator
+	Filters           []Filter
+	SpanNameFormatter SpanNameFormatter
+}
+
+// Filter is a predicate used to determine whether a given http.request should
+// be traced. A Filter must return true if the request should be traced.
+type Filter func(*http.Request) bool
+
+// SpanNameFormatter is used to set span name by http.request.
+type SpanNameFormatter func(r *http.Request) string
+
+// Option specifies instrumentation configuration options.
+type Option interface {
+	apply(*config)
+}
+
+type optionFunc func(*config)
+
+func (o optionFunc) apply(c *config) {
+	o(c)
+}
+
+// WithPropagators specifies propagators to use for extracting
+// information from the HTTP requests. If none are specified, global
+// ones will be used.
+func WithPropagators(propagators propagation.TextMapPropagator) Option {
+	return optionFunc(func(cfg *config) {
+		if propagators != nil {
+			cfg.Propagators = propagators
+		}
+	})
+}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+// If none is specified, the global provider is used.
+func WithTracerProvider(provider oteltrace.TracerProvider) Option {
+	return optionFunc(func(cfg *config) {
+		if provider != nil {
+			cfg.TracerProvider = provider
+		}
+	})
+}
+
+// WithFilter adds a filter to the list of filters used by the handler.
+// If any filter indicates to exclude a request then the request will not be
+// traced. All filters must allow a request to be traced for a Span to be created.
+// If no filters are provided then all requests are traced.
+// Filters will be invoked for each processed request, it is advised to make them
+// simple and fast.
+func WithFilter(f ...Filter) Option {
+	return optionFunc(func(c *config) {
+		c.Filters = append(c.Filters, f...)
+	})
+}
+
+// WithSpanNameFormatter takes a function that will be called on every
+// request and the returned string will become the Span Name.
+func WithSpanNameFormatter(f func(r *http.Request) string) Option {
+	return optionFunc(func(c *config) {
+		c.SpanNameFormatter = f
+	})
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/version.go
new file mode 100644
index 000000000..b31bb092f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin/version.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otelgin // import "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
+
+// Version is the current release version of the gin instrumentation.
+func Version() string {
+	return "0.40.0"
+	// This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+func SemVersion() string {
+	return "semver:" + Version()
+}
diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes
new file mode 100644
index 000000000..314766e91
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitattributes
@@ -0,0 +1,3 @@
+* text=auto eol=lf
+*.{cmd,[cC][mM][dD]} text eol=crlf
+*.{bat,[bB][aA][tT]} text eol=crlf
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
new file mode 100644
index 000000000..0b605b3d6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitignore
@@ -0,0 +1,21 @@
+.DS_Store
+Thumbs.db
+
+.tools/
+.idea/
+.vscode/
+*.iml
+*.so
+coverage.*
+
+gen/
+
+/example/fib/fib
+/example/fib/traces.txt
+/example/jaeger/jaeger
+/example/namedtracer/namedtracer
+/example/opencensus/opencensus
+/example/passthrough/passthrough
+/example/prometheus/prometheus
+/example/zipkin/zipkin
+/example/otel-collector/otel-collector
diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules
new file mode 100644
index 000000000..38a1f5698
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "opentelemetry-proto"]
+	path = exporters/otlp/internal/opentelemetry-proto
+	url = https://github.com/open-telemetry/opentelemetry-proto
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
new file mode 100644
index 000000000..0f099f575
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -0,0 +1,244 @@
+# See https://github.com/golangci/golangci-lint#config-file
+run:
+  issues-exit-code: 1 #Default
+  tests: true #Default
+
+linters:
+  # Disable everything by default so upgrades to not include new "default
+  # enabled" linters.
+  disable-all: true
+  # Specifically enable linters we want to use.
+  enable:
+    - depguard
+    - errcheck
+    - godot
+    - gofmt
+    - goimports
+    - gosimple
+    - govet
+    - ineffassign
+    - misspell
+    - revive
+    - staticcheck
+    - typecheck
+    - unused
+
+issues:
+  # Maximum issues count per one linter.
+  # Set to 0 to disable.
+  # Default: 50
+  # Setting to unlimited so the linter only is run once to debug all issues.
+  max-issues-per-linter: 0
+  # Maximum count of issues with the same text.
+  # Set to 0 to disable.
+  # Default: 3
+  # Setting to unlimited so the linter only is run once to debug all issues.
+  max-same-issues: 0
+  # Excluding configuration per-path, per-linter, per-text and per-source.
+  exclude-rules:
+    # TODO: Having appropriate comments for exported objects helps development,
+    # even for objects in internal packages. Appropriate comments for all
+    # exported objects should be added and this exclusion removed.
+    - path: '.*internal/.*'
+      text: "exported (method|function|type|const) (.+) should have comment or be unexported"
+      linters:
+        - revive
+    # Yes, they are, but it's okay in a test.
+    - path: _test\.go
+      text: "exported func.*returns unexported type.*which can be annoying to use"
+      linters:
+        - revive
+    # Example test functions should be treated like main.
+    - path: example.*_test\.go
+      text: "calls to (.+) only in main[(][)] or init[(][)] functions"
+      linters:
+        - revive
+  include:
+    # revive exported should have comment or be unexported.
+    - EXC0012
+    # revive package comment should be of the form ...
+    - EXC0013
+
+linters-settings:
+  depguard:
+    # Check the list against standard lib.
+    # Default: false
+    include-go-root: true
+    # A list of packages for the list type specified.
+    # Default: []
+    packages:
+      - "crypto/md5"
+      - "crypto/sha1"
+      - "crypto/**/pkix"
+    ignore-file-rules:
+      - "**/*_test.go"
+    additional-guards:
+      # Do not allow testing packages in non-test files.
+      - list-type: denylist
+        include-go-root: true
+        packages:
+          - testing
+          - github.com/stretchr/testify
+        ignore-file-rules:
+          - "**/*_test.go"
+          - "**/*test/*.go"
+          - "**/internal/matchers/*.go"
+  godot:
+    exclude:
+      # Exclude sentence fragments for lists.
+      - '^[ ]*[-•]'
+      # Exclude sentences prefixing a list.
+      - ':$'
+  goimports:
+    local-prefixes: go.opentelemetry.io
+  misspell:
+    locale: US
+    ignore-words:
+      - cancelled
+  revive:
+    # Sets the default failure confidence.
+    # This means that linting errors with less than 0.8 confidence will be ignored.
+    # Default: 0.8
+    confidence: 0.01
+    rules:
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports
+      - name: blank-imports
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
+      - name: bool-literal-in-expr
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr
+      - name: constant-logical-expr
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
+      # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
+      - name: context-as-argument
+        disabled: true
+        arguments:
+          allowTypesBefore: "*testing.T"
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
+      - name: context-keys-type
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit
+      - name: deep-exit
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer
+      - name: defer
+        disabled: false
+        arguments:
+          - ["call-chain", "loop"]
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports
+      - name: dot-imports
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
+      - name: duplicated-imports
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
+      - name: early-return
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
+      - name: empty-block
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
+      - name: empty-lines
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming
+      - name: error-naming
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return
+      - name: error-return
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings
+      - name: error-strings
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf
+      - name: errorf
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported
+      - name: exported
+        disabled: false
+        arguments:
+          - "sayRepetitiveInsteadOfStutters"
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter
+      - name: flag-parameter
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches
+      - name: identical-branches
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
+      - name: if-return
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement
+      - name: increment-decrement
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow
+      - name: indent-error-flow
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
+      - name: import-shadowing
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments
+      - name: package-comments
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range
+      - name: range
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure
+      - name: range-val-in-closure
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address
+      - name: range-val-address
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id
+      - name: redefines-builtin-id
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
+      - name: string-format
+        disabled: false
+        arguments:
+          - - panic
+            - '/^[^\n]*$/'
+            - must not contain line breaks
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
+      - name: struct-tag
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else
+      - name: superfluous-else
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal
+      - name: time-equal
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming
+      - name: var-naming
+        disabled: false
+        arguments:
+          - ["ID"] # AllowList
+          - ["Otel", "Aws", "Gcp"] # DenyList
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration
+      - name: var-declaration
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion
+      - name: unconditional-recursion
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return
+      - name: unexported-return
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
+      - name: unhandled-error
+        disabled: false
+        arguments:
+          - "fmt.Fprint"
+          - "fmt.Fprintf"
+          - "fmt.Fprintln"
+          - "fmt.Print"
+          - "fmt.Printf"
+          - "fmt.Println"
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt
+      - name: unnecessary-stmt
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
+      - name: useless-break
+        disabled: false
+      # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
+      - name: waitgroup-by-value
+        disabled: false
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
new file mode 100644
index 000000000..40d62fa2e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -0,0 +1,6 @@
+http://localhost
+http://jaeger-collector
+https://github.com/open-telemetry/opentelemetry-go/milestone/
+https://github.com/open-telemetry/opentelemetry-go/projects
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
new file mode 100644
index 000000000..3202496c3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
@@ -0,0 +1,29 @@
+# Default state for all rules
+default: true
+
+# ul-style
+MD004: false
+
+# hard-tabs
+MD010: false
+
+# line-length
+MD013: false
+
+# no-duplicate-header
+MD024:
+  siblings_only: true
+
+#single-title
+MD025: false
+
+# ol-prefix
+MD029:
+  style: ordered
+
+# no-inline-html
+MD033: false
+
+# fenced-code-language
+MD040: false
+
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
new file mode 100644
index 000000000..1d9726f60
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -0,0 +1,2369 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [1.14.0/0.37.0/0.0.4] 2023-02-27
+
+This release is the last to support [Go 1.18].
+The next release will require at least [Go 1.19].
+
+### Added
+
+- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697)
+- Support [Go 1.20]. (#3693)
+- The `go.opentelemetry.io/otel/semconv/v1.18.0` package.
+  The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719)
+  - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+    - `OtelScopeNameKey` -> `OTelScopeNameKey`
+    - `OtelScopeVersionKey` -> `OTelScopeVersionKey`
+    - `OtelLibraryNameKey` -> `OTelLibraryNameKey`
+    - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey`
+    - `OtelStatusCodeKey` -> `OTelStatusCodeKey`
+    - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey`
+    - `OtelStatusCodeOk` -> `OTelStatusCodeOk`
+    - `OtelStatusCodeError` -> `OTelStatusCodeError`
+  - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+    - `OtelScopeName` -> `OTelScopeName`
+    - `OtelScopeVersion` -> `OTelScopeVersion`
+    - `OtelLibraryName` -> `OTelLibraryName`
+    - `OtelLibraryVersion` -> `OTelLibraryVersion`
+    - `OtelStatusDescription` -> `OTelStatusDescription`
+- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state.
+  See the [README](./bridge/opentracing/README.md) for more information. (#3570)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739)
+- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763)
+  - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports.
+  - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted.
+
+### Changed
+
+- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679)
+- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into.
+  This change is made to enable memory reuse by SDK users. (#3732)
+- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776)
+
+### Fixed
+
+- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725)
+- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724)
+- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733)
+- Do not silently drop unknown schema data with `Parse` in  `go.opentelemetry.io/otel/schema/v1.1`. (#3743)
+- Data race issue in OTLP exporter retry mechanism. (#3755, #3756)
+- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772)
+- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/unit` package is deprecated.
+  Use the equivalent unit string instead. (#3776)
+  - Use `"1"` instead of `unit.Dimensionless`
+  - Use `"By"` instead of `unit.Bytes`
+  - Use `"ms"` instead of `unit.Milliseconds`
+
+## [1.13.0/0.36.0] 2023-02-07
+
+### Added
+
+- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions.
+  These functions ensure semantic convention type correctness. (#3675)
+
+### Fixed
+
+- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687)
+  - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`
+  - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv`
+  - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv`
+  - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv`
+  - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv`
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631)
+
+## [1.12.0/0.35.0] 2023-01-28
+
+### Added
+
+- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+  This options is used to configure `int64` Observer callbacks during their creation. (#3507)
+- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+  This options is used to configure `float64` Observer callbacks during their creation. (#3507)
+- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`.
+  These additions are used to enable external metric Producers. (#3524)
+- The `Callback` function type to `go.opentelemetry.io/otel/metric`.
+  This new named function type is registered with a `Meter`. (#3564)
+- The `go.opentelemetry.io/otel/semconv/v1.13.0` package.
+  The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499)
+  - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+  - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`.
+  - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`.
+- The `go.opentelemetry.io/otel/semconv/v1.14.0` package.
+  The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566)
+- The `go.opentelemetry.io/otel/semconv/v1.15.0` package.
+  The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578)
+- The `go.opentelemetry.io/otel/semconv/v1.16.0` package.
+  The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579)
+- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`.
+  These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
+  - `Float64ObservableCounter` replaces the `asyncfloat64.Counter`
+  - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter`
+  - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge`
+  - `Int64ObservableCounter` replaces the `asyncint64.Counter`
+  - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter`
+  - `Int64ObservableGauge` replaces the `asyncint64.Gauge`
+  - `Float64Counter` replaces the `syncfloat64.Counter`
+  - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter`
+  - `Float64Histogram` replaces the `syncfloat64.Histogram`
+  - `Int64Counter` replaces the `syncint64.Counter`
+  - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter`
+  - `Int64Histogram` replaces the `syncint64.Histogram`
+- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`.
+  This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116)
+- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+  This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487)
+- The `go.opentelemetry.io/otel/semconv/v1.17.0` package.
+  The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599)
+
+### Changed
+
+- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500)
+- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507)
+  - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`.
+  - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`.
+  - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`.
+  - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`.
+- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package.
+  This `Registration` can be used to unregister callbacks. (#3522)
+- Global error handler uses an atomic value instead of a mutex. (#3543)
+- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541)
+- Global logger uses an atomic value instead of a mutex. (#3545)
+- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551)
+- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions.
+  This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557)
+- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name.
+  Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516)
+- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514)
+- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562)
+  - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter`
+  - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter`
+  - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram`
+  - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter`
+  - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter`
+  - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge`
+- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed.
+  - The named `Callback` replaces the inline function parameter. (#3564)
+  - `Callback` is required to return an error. (#3576)
+  - `Callback` accepts the added `Observer` parameter added.
+    This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584)
+  - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587)
+- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions.
+  This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint.
+  Instead it uses the `net.sock.peer` attributes. (#3581)
+- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487)
+
+### Fixed
+
+- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549)
+- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter.
+  Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated.
+  Use `NewMetricProducer` instead. (#3541)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated.
+  Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated.
+  Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated.
+  Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated.
+  Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated.
+  Use `NewTracerProvider` instead. (#3116)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520)
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed.
+  Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+  - The `Counter` method is replaced by `Meter.Int64ObservableCounter`
+  - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter`
+  - The `Gauge` method is replaced by `Meter.Int64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed.
+  Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+  - The `Counter` method is replaced by `Meter.Float64ObservableCounter`
+  - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter`
+  - The `Gauge` method is replaced by `Meter.Float64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed.
+  Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+  - The `Counter` method is replaced by `Meter.Int64Counter`
+  - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter`
+  - The `Histogram` method is replaced by `Meter.Int64Histogram`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed.
+  Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+  - The `Counter` method is replaced by `Meter.Float64Counter`
+  - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter`
+  - The `Histogram` method is replaced by `Meter.Float64Histogram`
+
+## [1.11.2/0.34.0] 2022-12-05
+
+### Added
+
+- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package.
+   This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387)
+- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter.
+  This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357)
+- OTLP exporters now recognize: (#3363)
+  - `OTEL_EXPORTER_OTLP_INSECURE`
+  - `OTEL_EXPORTER_OTLP_TRACES_INSECURE`
+  - `OTEL_EXPORTER_OTLP_METRICS_INSECURE`
+  - `OTEL_EXPORTER_OTLP_CLIENT_KEY`
+  - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY`
+  - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY`
+  - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE`
+  - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE`
+  - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE`
+- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`.
+  These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`.
+  These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459)
+- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`.
+   Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option.
+   The views registered with the `MeterProvider` apply to all `Reader`s. (#3387)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260)
+- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260)
+- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369)
+- Remove comparable requirement for `Reader`s. (#3387)
+- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389)
+- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
+- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
+- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
+- Reenabled Attribute Filters in the Metric SDK. (#3396)
+- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408)
+- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
+- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
+- Prevent duplicate Prometheus description, unit, and type. (#3469)
+- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489)
+
+### Removed
+
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated.
+  Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476)
+
+## [1.11.1/0.33.0] 2022-10-19
+
+### Added
+
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
+   By default, it will register with the default Prometheus registerer.
+   A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
+- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
+   It will return an error if the exporter fails to register with Prometheus. (#3239)
+
+### Fixed
+
+- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
+- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
+   This fixes the implementation to be compliant with the W3C specification. (#3226)
+- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
+- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
+- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
+- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
+- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
+   Instead the exporter is defined as an "unchecked" collector for Prometheus.
+   This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
+   This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
+
+## [1.11.0/0.32.3] 2022-10-12
+
+### Added
+
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261)
+
+### Changed
+
+- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214)
+- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`.
+  This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235)
+
+## [0.32.2] Metric SDK (Alpha) - 2022-10-11
+
+### Added
+
+- Added an example of using metric views to customize instruments. (#3177)
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261)
+
+### Changed
+
+- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
+- Update histogram default bounds to match the requirements of the latest specification. (#3222)
+- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer.  (#3265)
+
+### Fixed
+
+- Use default view if instrument does not match any registered view of a reader. (#3224, #3237)
+- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251)
+- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251)
+- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251)
+- The OpenCensus bridge no longer sends empty batches of metrics. (#3263)
+
+## [0.32.1] Metric SDK (Alpha) - 2022-09-22
+
+### Changed
+
+- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting.
+   Invalid characters are replaced with `_`. (#3212)
+
+### Added
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192)
+- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206)
+
+### Fixed
+
+- Updated go.mods to point to valid versions of the sdk. (#3216)
+- Set the `MeterProvider` resource on all exported metric data. (#3218)
+
+## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18
+
+### Changed
+
+- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
+  Please see the package documentation for how the new SDK is initialized and configured. (#3175)
+- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179)
+
+### Removed
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
+  A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
+  A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
+- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
+
+## [1.10.0] - 2022-09-09
+
+### Added
+
+- Support Go 1.19. (#3077)
+  Include compatibility testing and document support. (#3077)
+- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106)
+- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107)
+
+### Changed
+
+- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`).  (#3096)
+- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110)
+- All exporters will be shutdown even if one reports an error (#3091)
+- Ensure valid UTF-8 when truncating over-length attribute values. (#3156)
+
+## [1.9.0/0.0.3] - 2022-08-01
+
+### Added
+
+- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999)
+- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package.
+  The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009)
+- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package.
+  The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010)
+- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018)
+
+### Fixed
+
+- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029)
+
+## [1.8.0/0.31.0] - 2022-07-08
+
+### Added
+
+- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods
+of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911)
+
+### Changed
+
+- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886)
+- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976)
+- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866)
+
+### Removed
+
+- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917)
+
+### Deprecated
+
+- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated.
+  Use the equivalent `Scope` struct instead. (#2977)
+- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated.
+  Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977)
+
+## [1.7.0/0.30.0] - 2022-04-28
+
+### Added
+
+- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package.
+  The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763)
+- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package.
+  The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792)
+- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package.
+  The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842)
+- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776)
+
+### Fixed
+
+- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784)
+- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786)
+
+### Changed
+
+- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790)
+- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`.
+  The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790)
+- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`.
+  Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790)
+
+### Deprecated
+
+- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+  Use the equivalent `Iterator.Attribute` method instead. (#2790)
+- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+  Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790)
+- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+  Use the equivalent `MergeIterator.Attribute` method instead. (#2790)
+
+### Removed
+
+- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
+- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
+
+## [0.29.0] - 2022-04-11
+
+### Added
+
+- The metrics global package was added back into several test files. (#2764)
+- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package.
+  This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750)
+
+### Removed
+
+- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`.
+  Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720)
+
+### Changed
+
+- Don't panic anymore when setting a global MeterProvider to itself. (#2749)
+- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`.
+  This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748)
+
+## [1.6.3] - 2022-04-07
+
+### Fixed
+
+- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773)
+
+## [1.6.2] - 2022-04-06
+
+### Changed
+
+- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749)
+- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`.
+  This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748)
+
+## [1.6.1] - 2022-03-28
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant.
+  Instead of using `"https://opentelemetry.io/schemas/v<version>"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/<version>"`. (#2743, #2744)
+
+### Security
+
+- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`.
+  This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728)
+
+## [1.6.0/0.28.0] - 2022-03-23
+
+### ⚠️ Notice ⚠️
+
+This update is a breaking change of the unstable Metrics API.
+Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified.
+
+### Added
+
+- Add metrics exponential histogram support.
+  New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502)
+- Add Go 1.18 to our compatibility tests. (#2679)
+- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517)
+- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660)
+
+### Changed
+
+- The metrics API has been significantly changed to match the revised OpenTelemetry specification.
+  High-level changes include:
+
+  - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s.
+    These `InstrumentProvider`s are managed with a `Meter`.
+  - Synchronous and asynchronous instruments are grouped into their own packages based on value types.
+  - Asynchronous callbacks can now be registered with a `Meter`.
+
+  Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660)
+
+### Fixed
+
+- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677)
+
+## [1.5.0] - 2022-03-16
+
+### Added
+
+- Log the Exporters configuration in the TracerProviders message. (#2578)
+- Added support to configure the span limits with environment variables.
+  The following environment variables are supported. (#2606, #2637)
+  - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT`
+  - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT`
+  - `OTEL_SPAN_EVENT_COUNT_LIMIT`
+  - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
+  - `OTEL_SPAN_LINK_COUNT_LIMIT`
+  - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
+
+  If the provided environment variables are invalid (negative), the default values would be used.
+- Rename the `gc` runtime name to `go` (#2560)
+- Add resource container ID detection. (#2418)
+- Add span attribute value length limit.
+  The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`.
+  The default limit for this resource is "unlimited". (#2637)
+- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`.
+  This option replaces the `WithSpanLimits` option.
+  Zero or negative values will not be changed to the default value like `WithSpanLimits` does.
+  Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited.
+  Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637)
+
+### Changed
+
+- Drop oldest tracestate `Member` when capacity is reached. (#2592)
+- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601)
+- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639)
+- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640)
+- Introduce new internal `envconfig` package for OTLP exporters. (#2608)
+- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661)
+
+### Fixed
+
+- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616)
+- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625)
+- Unlimited span limits are now supported (negative values). (#2636, #2637)
+
+### Deprecated
+
+- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`.
+  Use `WithRawSpanLimits` instead.
+  That option allows setting unlimited and zero limits, this option does not.
+  This option will be kept until the next major version incremented release. (#2637)
+
+## [1.4.1] - 2022-02-16
+
+### Fixed
+
+- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
+
+## [1.4.0] - 2022-02-11
+
+### Added
+
+- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490)
+- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging.
+  To enable use a logger with Verbosity (V level) `>=1`. (#2500)
+- Added support to configure the batch span-processor with environment variables.
+  The following environment variables are used. (#2515)
+  - `OTEL_BSP_SCHEDULE_DELAY`
+  - `OTEL_BSP_EXPORT_TIMEOUT`
+  - `OTEL_BSP_MAX_QUEUE_SIZE`.
+  - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
+
+### Changed
+
+- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589)
+
+### Deprecated
+
+- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`.
+  Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382)
+- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445)
+
+### Fixed
+
+- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461)
+- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512)
+- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491)
+- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493)
+- W3C baggage will now decode urlescaped values. (#2529)
+- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522)
+- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification.
+  Instead of dropping the least-recently-used attribute, the last added attribute is dropped.
+  This drop order still only applies to attributes with unique keys not already contained in the span.
+  If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576)
+
+### Removed
+
+- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546)
+  - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge)
+  - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram)
+  - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum)
+
+## [1.3.0] - 2021-12-10
+
+### ⚠️ Notice ⚠️
+
+We have updated the project minimum supported Go version to 1.16
+
+### Added
+
+- Added an internal Logger.
+  This can be used by the SDK and API to provide users with feedback of the internal state.
+  To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343)
+- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425)
+- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329)
+- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425)
+- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425)
+- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432)
+- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification.
+  Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP_<signal>_ENDPOINT` environment variable is now used without modification of the path.
+  When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433)
+- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381)
+- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440)
+
+### Deprecated
+
+- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425)
+- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425)
+
+### Removed
+
+- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350)
+- Remove the metric Bound Instruments interface and implementations. (#2399)
+- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423)
+- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348)
+
+## [1.2.0] - 2021-11-12
+
+### Changed
+
+- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274)
+- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274)
+- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface:
+  - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner`
+  - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`.
+  - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271)
+- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335)
+
+### Added
+
+- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
+- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267)
+- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334)
+
+## [1.1.0] - 2021-10-27
+
+### Added
+
+- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
+- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package.
+  The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320)
+- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package.
+  The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321)
+- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package.
+  The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322)
+  - When upgrading from the `semconv/v1.4.0` package note the following name changes:
+    - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey`
+    - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey`
+    - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey`
+    - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey`
+    - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey`
+    - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey`
+
+### Changed
+
+- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275).
+
+### Fixed
+
+- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284)
+- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285)
+- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289)
+
+## [1.0.1] - 2021-10-01
+
+### Fixed
+
+- json stdout exporter no longer crashes due to concurrency bug. (#2265)
+
+## [Metrics 0.24.0] - 2021-10-01
+
+### Changed
+
+- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237)
+- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
+  - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
+  - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
+
+## [1.0.0] - 2021-09-20
+
+This is the first stable release for the project.
+This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md).
+
+### Added
+
+- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242)
+
+### Fixed
+
+- Slice-valued attributes can correctly be used as map keys. (#2223)
+
+### Removed
+
+- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248)
+- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234)
+- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233)
+- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package.
+  Use the typed functions and methods added to the package instead. (#2235)
+  - The `Key.Array` method is removed.
+  - The `Array` function is removed.
+  - The `Any` function is removed.
+  - The `ArrayValue` function is removed.
+  - The `AsArray` function is removed.
+
+## [1.0.0-RC3] - 2021-09-02
+
+### Added
+
+- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149)
+- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163)
+- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162)
+  - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package.
+- Added the `go.opentelemetry.io/otel/example/fib` example package.
+  Included is an example application that computes Fibonacci numbers. (#2203)
+
+### Changed
+
+- Metric instruments have been renamed to match the (feature-frozen) metric API specification:
+  - ValueRecorder becomes Histogram
+  - ValueObserver becomes Gauge
+  - SumObserver becomes CounterObserver
+  - UpDownSumObserver becomes UpDownCounterObserver
+  The API exported from this project is still considered experimental. (#2202)
+- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091)
+- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
+- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
+- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated.
+  All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package.
+  The functions from that package should be used instead. (#2166)
+- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated.
+  Use the typed `*Slice` functions and types added to the package instead. (#2162)
+- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated.
+  Use the typed functions instead. (#2181)
+- The `go.opentelemetry.io/otel/oteltest` package is deprecated.
+  The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188)
+
+### Removed
+
+- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105)
+
+### Fixed
+
+- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138)
+- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
+- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
+- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
+- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195)
+- Fixed typos in resources.go. (#2201)
+
+## [1.0.0-RC2] - 2021-07-26
+
+### Added
+
+- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840)
+- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840)
+- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
+  This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095)
+- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115)
+- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`.
+  This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK.
+  For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118)
+- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package.
+  This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132)
+
+### Changed
+
+- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027)
+- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095)
+
+### Deprecated
+
+- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114)
+- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123)
+- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated.
+  Use the `trace.ParseTraceState` function instead. (#2122)
+
+### Removed
+
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020)
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020)
+- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function.
+  The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097)
+- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
+  The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095)
+- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118)
+
+### Fixed
+
+- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032)
+- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073)
+- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092)
+- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package.
+  This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102)
+- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108)
+- Use `6831` as default Jaeger agent port instead of `6832`. (#2131)
+
+## [Experimental Metrics v0.22.0] - 2021-07-19
+
+### Added
+
+- Adds HTTP support for OTLP metrics exporter. (#2022)
+
+### Removed
+
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020)
+
+## [1.0.0-RC1] / 0.21.0 - 2021-06-18
+
+With this release we are introducing a split in module versions.  The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1`
+while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`.  Modules at major version 1 or greater will not depend on modules
+with major version 0.
+
+### Added
+
+- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832)
+  - The following status codes are defined as transient errors:
+      | gRPC Status Code | Description |
+      | ---------------- | ----------- |
+      | 1  | Cancelled |
+      | 4  | Deadline Exceeded |
+      | 8  | Resource Exhausted |
+      | 10 | Aborted |
+      | 10 | Out of Range |
+      | 14 | Unavailable |
+      | 15 | Data Loss |
+- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874)
+- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package.
+  This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873)
+- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886)
+- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889)
+- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912)
+- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package.
+  It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937)
+- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package.
+  This method returns the number of list-members the `TraceState` holds. (#1937)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data.
+  Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922)
+- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967)
+- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package.
+  These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967)
+- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
+- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
+- Several builtin resource detectors now correctly populate the schema URL. (#1938)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
+- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005)
+- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005)
+- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline.  (#2009)
+
+### Changed
+
+- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item.
+  `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798)
+- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810)
+- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846)
+- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865)
+- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860)
+- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855)
+- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871)
+- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method.
+  This method returns the status of a span using the new `Status` type. (#1874)
+- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`.
+  This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873)
+- Unembed `SpanContext` in `Link`. (#1877)
+- Generate Semantic conventions from the specification YAML. (#1891)
+- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901)
+- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902)
+- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903)
+- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921)
+- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921)
+- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Refactored option types according to the contribution style guide. (#1882)
+- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package.
+  This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use.
+  The new `ParseTraceState` function should be used to create a `TraceState`. (#1931)
+- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931)
+- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931)
+- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
+- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931)
+- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
+- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
+- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987)
+- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993)
+- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993)
+- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993)
+
+### Removed
+
+- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810)
+- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810)
+- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`.
+  The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate.
+  The `IsRecording` method returns if the span is recording or not.
+  A read-only span value does not need to know if updates to it will be recorded or not.
+  By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873)
+- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package.
+  The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type.
+  When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873)
+- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package.
+  Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own.
+  The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900)
+  - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009)
+- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919)
+- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931)
+- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package.
+  Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967)
+- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed.
+  These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985)
+- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed.  Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990)
+- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed.  Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005)
+
+### Fixed
+
+- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851)
+- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856)
+- BatchSpanProcessor now drops span batches that failed to be exported. (#1860)
+- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898)
+- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931)
+- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973)
+- Avoid transport security when OTLP endpoint is a Unix socket. (#2001)
+
+### Security
+
+## [0.20.0] - 2021-04-23
+
+### Added
+
+- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373)
+- Adds semantic conventions for exceptions. (#1492)
+- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT`
+  These environment variables can be used to override Jaeger agent hostname and port (#1752)
+- Option `ExportTimeout` was added to batch span processor. (#1755)
+- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770)
+- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771)
+- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771)
+- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772)
+- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785)
+- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788)
+- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788)
+  - `process.pid`
+  - `process.executable.name`
+  - `process.executable.path`
+  - `process.command_args`
+  - `process.owner`
+  - `process.runtime.name`
+  - `process.runtime.version`
+  - `process.runtime.description`
+- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789)
+- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811)
+  - `OTEL_EXPORTER_OTLP_ENDPOINT`
+  - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`
+  - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`
+  - `OTEL_EXPORTER_OTLP_HEADERS`
+  - `OTEL_EXPORTER_OTLP_TRACES_HEADERS`
+  - `OTEL_EXPORTER_OTLP_METRICS_HEADERS`
+  - `OTEL_EXPORTER_OTLP_COMPRESSION`
+  - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION`
+  - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION`
+  - `OTEL_EXPORTER_OTLP_TIMEOUT`
+  - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`
+  - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT`
+  - `OTEL_EXPORTER_OTLP_CERTIFICATE`
+  - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
+  - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE`
+- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821)
+- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853)
+
+### Fixed
+
+- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750)
+- The Jaeger exporter now correctly sets tags for the Span status code and message.
+  This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761)
+- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag.
+  Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768)
+- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688)
+- Fixed typo for default service name in Jaeger Exporter. (#1797)
+- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814)
+- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit.
+  Instead, the exporter now splits the batch into smaller sendable batches. (#1828)
+
+### Changed
+
+- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492)
+- Jaeger exporter was updated to use thrift v0.14.1. (#1712)
+- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713)
+- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713)
+- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span.
+  The Span's SpanContext can now self-identify as being remote or not.
+  This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731)
+- Improve OTLP/gRPC exporter connection errors. (#1737)
+- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field.
+  The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748)
+- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span.
+  This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749)
+- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD`
+  to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752)
+- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757)
+- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself.
+  It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771)
+- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773)
+- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777)
+- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778)
+- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796)
+- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800)
+- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create.
+  This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822)
+- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824)
+- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument.
+  The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824)
+- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
+- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830)
+
+### Removed
+
+- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS`
+  These environment variables will no longer be used to override values of the Jaeger exporter (#1752)
+- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root.
+  This is unspecified behavior that the OpenTelemetry community plans to standardize in the future.
+  To prevent backwards incompatible changes when it is specified, these links are removed. (#1726)
+- Setting error status while recording error with Span from oteltest package. (#1729)
+- The concept of a remote and local Span stored in a context is unified to just the current Span.
+  Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
+  Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
+  If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
+- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
+  This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
+- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770)
+- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package.
+  The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804)
+- Remove the `WithDisabled` option from the Jaeger exporter.
+  To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806)
+- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter.
+  These functions for retrieving specific environment variable values are redundant of other internal functions and
+  are not intended for end user use. (#1824)
+- Removed the Jaeger exporter `WithSDKOptions` `Option`.
+  This option was used to set SDK options for the exporter creation convenience functions.
+  These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases.
+  If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825)
+- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed.
+  The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
+- The Jaeger exporter `Option` type is removed.
+  The type is no longer used by the exporter to configure anything.
+  All the previous configurations these options provided were duplicates of SDK configuration.
+  They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
+
+## [0.19.0] - 2021-03-18
+
+### Added
+
+- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586)
+- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608)
+- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702)
+- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701)
+- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703)
+
+### Changed
+
+- `trace.SpanContext` is now immutable and has no exported fields. (#1573)
+  - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known.
+- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608)
+- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608)
+- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612)
+- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656)
+- Added non-empty string check for trace `Attribute` keys. (#1659)
+- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662)
+- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673)
+- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673)
+- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692)
+- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693)
+- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693)
+
+### Removed
+
+- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549)
+- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633)
+- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs.
+   These are now returned as a SpanProcessor interface from their respective constructors. (#1638)
+- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660)
+- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663)
+- Removed `jaeger.WithProcess` configuration option. (#1673)
+- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693)
+
+### Fixed
+
+- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626)
+- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655)
+- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678)
+- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681)
+- Synchronization issues in global trace delegate implementation. (#1686)
+- Reduced excess memory usage by global `TracerProvider`. (#1687)
+
+## [0.18.0] - 2021-03-03
+
+### Added
+
+- Added `resource.Default()` for use with meter and tracer providers. (#1507)
+- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535)
+- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544)
+- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558)
+- Compatibility testing suite in the CI system for the following systems. (#1567)
+   | OS      | Go Version | Architecture |
+   | ------- | ---------- | ------------ |
+   | Ubuntu  | 1.15       | amd64        |
+   | Ubuntu  | 1.14       | amd64        |
+   | Ubuntu  | 1.15       | 386          |
+   | Ubuntu  | 1.14       | 386          |
+   | MacOS   | 1.15       | amd64        |
+   | MacOS   | 1.14       | amd64        |
+   | Windows | 1.15       | amd64        |
+   | Windows | 1.14       | amd64        |
+   | Windows | 1.15       | 386          |
+   | Windows | 1.14       | 386          |
+
+### Changed
+
+- Replaced interface `oteltest.SpanRecorder` with its existing implementation
+  `StandardSpanRecorder`. (#1542)
+- Default span limit values to 128. (#1535)
+- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535)
+- Renamed the `otel/label` package to `otel/attribute`. (#1541)
+- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551)
+- Parallelize the CI linting and testing. (#1567)
+- Stagger timestamps in exact aggregator tests. (#1569)
+- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621)
+- Prevent end-users from implementing some interfaces (#1575)
+
+  ```
+      "otel/exporters/otlp/otlphttp".Option
+      "otel/exporters/stdout".Option
+      "otel/oteltest".Option
+      "otel/trace".TracerOption
+      "otel/trace".SpanOption
+      "otel/trace".EventOption
+      "otel/trace".LifeCycleOption
+      "otel/trace".InstrumentationOption
+      "otel/sdk/resource".Option
+      "otel/sdk/trace".ParentBasedSamplerOption
+      "otel/sdk/trace".ReadOnlySpan
+      "otel/sdk/trace".ReadWriteSpan
+  ```
+
+### Removed
+
+- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545)
+- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567)
+- Removed the `test-386` make target.
+   This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567)
+
+### Fixed
+
+- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572)
+- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577)
+- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579)
+- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581)
+- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570)
+
+## [0.17.0] - 2021-02-12
+
+### Changed
+
+- Rename project default branch from `master` to `main`. (#1505)
+- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501)
+- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528)
+- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528)
+- Move metric-related public global APIs from otel to otel/metric/global. (#1528)
+
+## Fixed
+
+- Fixed otlpgrpc reconnection issue.
+- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513)
+- The otel-collector example now uses the default OTLP receiver port of the collector.
+
+## [0.16.0] - 2021-01-13
+
+### Added
+
+- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360)
+- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369)
+- Added documentation about the project's versioning policy. (#1388)
+- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418)
+- Added codeql worfklow to GitHub Actions (#1428)
+- Added Gosec workflow to GitHub Actions (#1429)
+- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420)
+- Add an OpenCensus exporter bridge. (#1444)
+
+### Changed
+
+- Rename `internal/testing` to `internal/internaltest`. (#1449)
+- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360)
+- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360)
+- Improve span duration accuracy. (#1360)
+- Migrated CI/CD from CircleCI to GitHub Actions (#1382)
+- Remove duplicate checkout from GitHub Actions workflow (#1407)
+- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412)
+- Metric `exact` aggregator includes per-point timestamps (#1412)
+- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412)
+- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
+- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
+- Unify endpoint API that related to OTel exporter. (#1401)
+- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435)
+- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430)
+- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434)
+- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432)
+- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420)
+- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447)
+- Metric Push and Pull Controller components are combined into a single "basic" Controller:
+  - `WithExporter()` and `Start()` to configure Push behavior
+  - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior
+  - `Start()` and `Stop()` accept Context. (#1378)
+- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452)
+
+### Removed
+
+- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360)
+- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412)
+- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412)
+
+### Fixed
+
+- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443)
+
+## [0.15.0] - 2020-12-10
+
+### Added
+
+- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363)
+
+### Changed
+
+- The Zipkin exporter now uses the Span status code to determine. (#1328)
+- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357)
+- Move the OpenCensus example into `example` directory. (#1359)
+- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363)
+- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374)
+- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375)
+
+### Fixed
+
+- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381)
+
+## [0.14.0] - 2020-11-19
+
+### Added
+
+- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254)
+- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259)
+- `SpanContextFromContext` returns `SpanContext` from context. (#1255)
+- `TraceState` has been added to `SpanContext`. (#1340)
+- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323)
+- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305)
+- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333)
+- Add missing tests for `sdk/trace/attributes_map.go`. (#1337)
+
+### Changed
+
+- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307)
+  - `ID` has been renamed to `TraceID`.
+  - `IDFromHex` has been renamed to `TraceIDFromHex`.
+  - `EmptySpanContext` is removed.
+- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229)
+- OTLP Exporter updates:
+  - supports OTLP v0.6.0 (#1230, #1354)
+  - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296)
+- The Sampler is now called on local child spans. (#1233)
+- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240)
+- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`.
+   This matches the returned type and fixes misuse of the term metric. (#1240)
+- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241)
+- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252)
+- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321)
+- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316)
+- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316)
+- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254)
+- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254)
+- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330)
+- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330)
+- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267)
+- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276)
+- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and
+  `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235)
+- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210)
+- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310)
+- Updated span collection limits for attribute, event and link counts to 1000 (#1318)
+- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338)
+
+### Removed
+
+- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243)
+- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy.
+   It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254)
+- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`.
+   `Tracer` and `Span` from the same module should be used in their place instead. (#1306)
+- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350)
+- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314)
+
+### Fixed
+
+- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244)
+- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258)
+- Fix condition in `label.Any`. (#1299)
+- Fix global `TracerProvider` to pass options to its configured provider. (#1329)
+- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309)
+
+## [0.13.0] - 2020-10-08
+
+### Added
+
+- OTLP Metric exporter supports Histogram aggregation. (#1209)
+- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214)
+- A Baggage API to implement the OpenTelemetry specification. (#1217)
+- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227)
+
+### Changed
+
+- Set default propagator to no-op propagator. (#1184)
+- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325)
+- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
+- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification.
+   They now are `Unset`, `Error`, and `Ok`.
+   They no longer track the gRPC codes. (#1214)
+- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214)
+- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325)
+- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264)
+
+### Fixed
+
+- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226)
+
+### Removed
+
+- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212)
+- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification.
+   The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212)
+- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216)
+- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217)
+- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219)
+- Nested array/slice support has been removed. (#1226)
+
+## [0.12.0] - 2020-09-24
+
+### Added
+
+- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108)
+- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s.
+   This addition was made to conform with our project option conventions. (#1155)
+- Instrumentation library information was added to the Zipkin exporter. (#1119)
+- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166)
+- More semantic conventions for k8s as resource attributes. (#1167)
+
+### Changed
+
+- Add reconnecting udp connection type to Jaeger exporter.
+   This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record.
+   It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063)
+- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`.
+   This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108)
+- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`.
+   This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108)
+- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109)
+- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package.
+    This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118)
+- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119)
+- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115)
+- Move `tools` package under `internal`. (#1141)
+- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142)
+   The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`.  Other exported functions and types are unchanged.
+- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153)
+- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155)
+- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161)
+- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to
+   recommend the use of `newConfig()` instead of `configure()`. (#1163)
+- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163)
+- Ensure exported interface types include parameter names and update the
+   Style Guide to reflect this styling rule. (#1172)
+- Don't consider unset environment variable for resource detection to be an error. (#1170)
+- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and
+  `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`.
+- ValueObserver instruments use LastValue aggregator by default. (#1165)
+- OTLP Metric exporter supports LastValue aggregation. (#1165)
+- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185)
+- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
+- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
+- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190)
+- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190)
+- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
+- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
+- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
+- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
+- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
+- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190)
+- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190)
+- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192)
+- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201)
+- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195)
+- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203)
+
+### Removed
+
+- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the
+   `go.opentelemetry.io/contrib/propagators/` module. (#1191)
+- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194)
+
+### Fixed
+
+- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171)
+- Fix missing shutdown processor in otel-collector example. (#1186)
+- Fix missing shutdown processor in basic and namedtracer examples. (#1197)
+
+## [0.11.0] - 2020-08-24
+
+### Added
+
+- Support for exporting array-valued attributes via OTLP. (#992)
+- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994)
+- Support for filtering metric label sets. (#1047)
+- A dimensionality-reducing metric Processor. (#1057)
+- Integration tests for more OTel Collector Attribute types. (#1062)
+- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078)
+
+### Changed
+
+- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049)
+- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049)
+- Rename `api/testharness` to `api/apitest`. (#1049)
+- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049)
+- Change Metric Processor to merge multiple observations. (#1024)
+- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module.
+   This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038)
+- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016)
+- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042)
+- Replace `WithSyncer` with `WithBatcher` in examples. (#1044)
+- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046)
+- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060)
+- Unify Callback Function Naming.
+   Rename `*Callback` with `*Func`. (#1061)
+- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064)
+- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface.
+   This interface still supports the export of `SpanData`, but only as a slice.
+   Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078)
+- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error.
+   If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078)
+- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`.
+   This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078)
+
+### Removed
+
+- Duplicate, unused API sampler interface. (#999)
+   Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead.
+- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository.
+   This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027)
+- The `WithSpan` method of the `Tracer` interface.
+   The functionality this method provided was limited compared to what a user can provide themselves.
+   It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043)
+- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions.
+   These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077)
+- The `oterror` package. (#1026)
+- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032)
+
+### Fixed
+
+- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031)
+- Correct instrumentation version tag in Jaeger exporter. (#1037)
+- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043)
+- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050)
+- The `otel-collector` example referenced outdated collector processors. (#1006)
+
+## [0.10.0] - 2020-07-29
+
+This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages.
+
+### Added
+
+- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern.
+    These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944)
+- Add propagator option for gRPC instrumentation. (#986)
+- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987)
+
+### Changed
+
+- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function.
+   This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944)
+- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`.
+   This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963)
+- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962)
+- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968)
+  - `value.Bool` was replaced with `kv.BoolValue`.
+  - `value.Int64` was replaced with `kv.Int64Value`.
+  - `value.Uint64` was replaced with `kv.Uint64Value`.
+  - `value.Float64` was replaced with `kv.Float64Value`.
+  - `value.Int32` was replaced with `kv.Int32Value`.
+  - `value.Uint32` was replaced with `kv.Uint32Value`.
+  - `value.Float32` was replaced with `kv.Float32Value`.
+  - `value.String` was replaced with `kv.StringValue`.
+  - `value.Int` was replaced with `kv.IntValue`.
+  - `value.Uint` was replaced with `kv.UintValue`.
+  - `value.Array` was replaced with `kv.ArrayValue`.
+- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972)
+- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979)
+- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980)
+- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985)
+- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989)
+
+### Removed
+
+- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970)
+
+### Fixed
+
+- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953)
+- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957)
+- Use `global.Handle` for span export errors in the OTLP exporter. (#946)
+- Correct Go language formatting in the README documentation. (#961)
+- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977)
+- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983)
+- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984)
+
+## [0.9.0] - 2020-07-20
+
+### Added
+
+- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939)
+- A Detector to automatically detect resources from an environment variable. (#939)
+- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938)
+- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`.
+   References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942)
+
+### Changed
+
+- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948)
+
+### Removed
+
+- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943)
+
+## [0.8.0] - 2020-07-09
+
+### Added
+
+- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject.
+   A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882)
+- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882)
+- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882)
+- Add `peer.service` semantic attribute. (#898)
+- Add database-specific semantic attributes. (#899)
+- Add semantic convention for `faas.coldstart` and `container.id`. (#909)
+- Add http content size semantic conventions. (#905)
+- Include `http.request_content_length` in HTTP request basic attributes. (#905)
+- Add semantic conventions for operating system process resource attribute keys. (#919)
+- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931)
+
+### Changed
+
+- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879)
+- Use lowercase header names for B3 Multiple Headers. (#881)
+- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`.
+   This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings.
+   If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882)
+- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header.
+   Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid.
+   This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882)
+- Extend semantic conventions for RPC. (#900)
+- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920)
+  - `"api/standard".FaaSName` -> `FaaSNameKey`
+  - `"api/standard".FaaSID` -> `FaaSIDKey`
+  - `"api/standard".FaaSVersion` -> `FaaSVersionKey`
+  - `"api/standard".FaaSInstance` -> `FaaSInstanceKey`
+
+### Removed
+
+- The `FlagsUnused` trace flag is removed.
+   The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882)
+- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed.
+   If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882)
+
+### Fixed
+
+- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881)
+- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882)
+- The B3 propagator now propagates the debug flag.
+   This removes the behavior of changing the debug flag into a set sampling bit.
+   Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882)
+- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882)
+- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883)
+- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885)
+- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896)
+- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908)
+- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912)
+- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
+- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
+- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
+- Update otel-colector example to use the v0.5.0 collector. (#915)
+- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
+- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
+- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
+   This is in accordance with OpenTelemetry semantic conventions. (#922)
+- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923)
+- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925)
+- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926)
+- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930)
+
+## [0.7.0] - 2020-06-26
+
+This release implements the v0.5.0 version of the OpenTelemetry specification.
+
+### Added
+
+- The othttp instrumentation now includes default metrics. (#861)
+- This CHANGELOG file to track all changes in the project going forward.
+- Support for array type attributes. (#798)
+- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844)
+- Timestamps are now passed to exporters for each export. (#835)
+- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s.
+   This replaces the prior `Record` `struct` use for this purpose. (#835)
+- New dependabot integration to automate package upgrades. (#814)
+- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument.
+   This instrumentation version is passed on to exporters. (#811) (#805) (#802)
+- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811)
+- Environment variables for Jaeger exporter are supported. (#796)
+- New `aggregation.Kind` in the export metric API. (#808)
+- New example that uses OTLP and the collector. (#790)
+- Handle errors in the span `SetName` during span initialization. (#791)
+- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777)
+- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778)
+- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`.
+   There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778)
+- Options to specify propagators for httptrace and grpctrace instrumentation. (#784)
+- The required `application/json` header for the Zipkin exporter is included in all exports. (#774)
+- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769
+
+### Changed
+
+- Rename `Integrator` to `Processor` in the metric SDK. (#863)
+- Rename `AggregationSelector` to `AggregatorSelector`. (#859)
+- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858)
+- Rename `simple` integrator to `basic` integrator. (#857)
+- Merge otlp collector examples. (#841)
+- Change the metric SDK to support cumulative, delta, and pass-through exporters directly.
+   With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840)
+- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812)
+- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other.
+   All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`.
+   Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812)
+- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812)
+- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810)
+- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808
+- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806)
+- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791)
+- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779)
+- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781)
+
+### Removed
+
+- `Uint64NumberKind` and related functions from the API. (#864)
+- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803)
+- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775)
+
+### Fixed
+
+- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866)
+- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824)
+- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867)
+- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853)
+- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854)
+- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817)
+- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828)
+- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829)
+- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823)
+- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822)
+- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820)
+- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839)
+- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843)
+- Set span status from HTTP status code in the othttp instrumentation. (#832)
+- Fixed typo in push controller comment. (#834)
+- The `Aggregator` testing has been updated and cleaned. (#812)
+- `metric.Number(0)` expressions are replaced by `0` where possible. (#812)
+- Fixed `global` `handler_test.go` test failure. #804
+- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766)
+- Fixed OTLP example's accidental early close of exporter. (#807)
+- Ensure zipkin exporter reads and closes response body. (#788)
+- Update instrumentation to use `api/standard` keys instead of custom keys. (#782)
+- Clean up tools and RELEASING documentation. (#762)
+
+## [0.6.0] - 2020-05-21
+
+### Added
+
+- Support for `Resource`s in the prometheus exporter. (#757)
+- New pull controller. (#751)
+- New `UpDownSumObserver` instrument. (#750)
+- OpenTelemetry collector demo. (#711)
+- New `SumObserver` instrument. (#747)
+- New `UpDownCounter` instrument. (#745)
+- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742)
+- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731)
+
+### Changed
+
+- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761)
+- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758)
+- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756)
+- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754)
+- The prometheus exporter now uses the new pull controller. (#751)
+- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752)
+- Support use of synchronous instruments in asynchronous callbacks (#725)
+- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739)
+- Rename `Observer` instrument to `ValueObserver`. (#734)
+- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
+- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
+- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727)
+
+### Fixed
+
+- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755)
+- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743)
+- Fix `string` case in `kv` `Infer` function. (#746)
+- Fix panic in grpctrace client interceptors. (#740)
+- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737)
+- Rewrite span batch process queue batching logic. (#719)
+- Remove the push controller named Meter map. (#738)
+- Fix Histogram aggregator initial state (fix #735). (#736)
+- Ensure golang alpine image is running `golang-1.14` for examples. (#733)
+- Added test for grpctrace `UnaryInterceptorClient`. (#695)
+- Rearrange `api/metric` code layout. (#724)
+
+## [0.5.0] - 2020-05-13
+
+### Added
+
+- Batch `Observer` callback support. (#717)
+- Alias `api` types to root package of project. (#696)
+- Create basic `othttp.Transport` for simple client instrumentation. (#678)
+- `SetAttribute(string, interface{})` to the trace API. (#674)
+- Jaeger exporter option that allows user to specify custom http client. (#671)
+- `Stringer` and `Infer` methods to `key`s. (#662)
+
+### Changed
+
+- Rename `NewKey` in the `kv` package to just `Key`. (#721)
+- Move `core` and `key` to `kv` package. (#720)
+- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709)
+- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710)
+- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710)
+- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710)
+- Move `Number` from `core` to `api/metric` package. (#706)
+- Move `SpanContext` from `core` to `trace` package. (#692)
+- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681)
+
+### Fixed
+
+- Update tooling to run generators in all submodules. (#705)
+- gRPC interceptor regexp to match methods without a service name. (#683)
+- Use a `const` for padding 64-bit B3 trace IDs. (#701)
+- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700)
+- Left-pad 64-bit B3 trace IDs with zero. (#698)
+- Propagate at least the first W3C tracestate header. (#694)
+- Remove internal `StateLocker` implementation. (#688)
+- Increase instance size CI system uses. (#690)
+- Add a `key` benchmark and use reflection in `key.Infer()`. (#679)
+- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680)
+- Reimplement histogram using mutex instead of `StateLocker`. (#669)
+- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667)
+- Update documentation to not include any references to `WithKeys`. (#672)
+- Correct misspelling. (#668)
+- Fix clobbering of the span context if extraction fails. (#656)
+- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670)
+
+## [0.4.3] - 2020-04-24
+
+### Added
+
+- `Dockerfile` and `docker-compose.yml` to run example code. (#635)
+- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621)
+- New `api/label` package, providing common label set implementation. (#651)
+- Support for JSON marshaling of `Resources`. (#654)
+- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642)
+- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627)
+- `WithSpanFormatter` option to the othttp plugin. (#617)
+- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612)
+- The prometheus exporter now supports exporting histograms. (#601)
+- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613)
+- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613)
+- An `Equal` method to the `Resource` test the equivalence of resources. (#613)
+- An iterable structure (`AttributeIterator`) for `Resource` attributes.
+
+### Changed
+
+- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644)
+- Pass `Resources` through the metrics export pipeline. (#659)
+
+### Removed
+
+- `WithKeys` option from the metric API. (#639)
+
+### Fixed
+
+- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658)
+- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653)
+- Use type names for return values in jaeger exporter. (#648)
+- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650)
+- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647)
+- Do not cache `reflect.ValueOf()` in metric Labels. (#649)
+- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626)
+- Add error wrapping to the prometheus exporter. (#631)
+- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623)
+- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614)
+- Update `Resource` internal representation to uniquely and reliably identify resources. (#613)
+- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622)
+- Ensure spans created by httptrace client tracer reflect operation structure. (#618)
+- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
+- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
+
+## [0.4.2] - 2020-03-31
+
+### Fixed
+
+- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607)
+- Fix time conversion from internal to OTLP in OTLP exporter. (#606)
+
+## [0.4.1] - 2020-03-31
+
+### Fixed
+
+- Update `tag.sh` to create signed tags. (#604)
+
+## [0.4.0] - 2020-03-30
+
+### Added
+
+- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580)
+- Script to verify examples after a new release. (#579)
+
+### Removed
+
+- The dogstatsd exporter due to lack of support.
+   This additionally removes support for statsd. (#591)
+- `LabelSet` from the metric API.
+   This is replaced by a `[]core.KeyValue` slice. (#595)
+- `Labels` from the metric API's `Meter` interface. (#595)
+
+### Changed
+
+- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574)
+- Renamed `internal/metric.Meter` to `MeterImpl`. (#580)
+- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580)
+
+### Fixed
+
+- Corrected missing return in mock span. (#582)
+- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596)
+- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588)
+- Update pre-release script to be compatible between GNU and BSD based systems. (#592)
+- Add a `RecordBatch` benchmark. (#594)
+- Moved span transforms of the OTLP exporter to the internal package. (#593)
+- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569)
+- Removed unneeded allocation on empty labels in OLTP exporter. (#597)
+- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599)
+- Update project documentation godoc.org links to pkg.go.dev. (#602)
+
+## [0.3.0] - 2020-03-21
+
+This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality.
+There is still a possibility of breaking changes.
+
+### Added
+
+- Add `Observer` metric instrument. (#474)
+- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494)
+- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
+- The zipkin trace exporter. (#495)
+- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
+- Add `StatusMessage` field to the trace `Span`. (#524)
+- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
+- The `Resource` type was added to the SDK. (#528)
+- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
+- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction.
+   Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560)
+- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560)
+- Scripts to better automate the release process. (#576)
+
+### Changed
+
+- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506)
+- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511)
+- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511)
+- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524)
+- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531)
+- Rename metric API `Options` to `Config`. (#541)
+- Rename metric `Counter` aggregator to be `Sum`. (#541)
+- Unify metric options into `Option` from instrument specific options. (#541)
+- The trace API's `TraceProvider` now support `Resource`s. (#545)
+- Correct error in zipkin module name. (#548)
+- The jaeger trace exporter now supports `Resource`s. (#551)
+- Metric SDK now supports `Resource`s.
+   The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552)
+- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557)
+- The stdout trace exporter now supports `Resource`s. (#558)
+- The metric `Descriptor` is now included at the API instead of the SDK. (#560)
+- Replace `Ordered` with an iterator in `export.Labels`. (#567)
+
+### Removed
+
+- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452)
+- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560)
+- `GetDescriptor` from the metric SDK. (#575)
+- The `Gauge` instrument from the metric API. (#537)
+
+### Fixed
+
+- Make histogram aggregator checkpoint consistent. (#438)
+- Update README with import instructions and how to build and test. (#505)
+- The default label encoding was updated to be unique. (#508)
+- Use `NewRoot` in the othttp plugin for public endpoints. (#513)
+- Fix data race in `BatchedSpanProcessor`. (#518)
+- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521
+- Use a variable-size array to represent ordered labels in maps. (#523)
+- Update the OTLP protobuf and update changed import path. (#532)
+- Use `StateLocker` implementation in `MinMaxSumCount`. (#546)
+- Eliminate goroutine leak in histogram stress test. (#547)
+- Update OTLP exporter with latest protobuf. (#550)
+- Add filters to the othttp plugin. (#556)
+- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565)
+- Encode labels once during checkpoint.
+   The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter.
+   This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572)
+- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573)
+
+## [0.2.3] - 2020-03-04
+
+### Added
+
+- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473)
+- Configurable push frequency for exporters setup pipeline. (#504)
+
+### Changed
+
+- Rename the `exporter` directory to `exporters`.
+   The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`.
+   This resulted in all subsequent releases not becoming the default latest.
+   A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages.
+   Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags.
+   Consequentially, this action also renames *all* exporter packages. (#502)
+
+### Removed
+
+- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503)
+
+## [0.2.2] - 2020-02-27
+
+### Added
+
+- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467)
+- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467)
+- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467)
+- `Config` and configuring `Option` to the propagator API. (#467)
+- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467)
+- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467)
+- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467)
+- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467)
+- Histogram aggregator. (#433)
+- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456)
+- `AlwaysParentSample` sampler to the trace API. (#455)
+- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
+
+### Changed
+
+- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
+- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
+- Move correlation context propagation to correlation package. (#479)
+- Do not default to putting remote span context into links. (#480)
+- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
+- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
+- Renamed the `export` package to `metric` to match directory structure. (#432)
+- Rename the `api/distributedcontext` package to `api/correlation`. (#444)
+- Rename the `api/propagators` package to `api/propagation`. (#444)
+- Move the propagators from the `propagators` package into the `trace` API package. (#444)
+- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462)
+- Moved all dependencies of tools package to a tools directory. (#466)
+
+### Removed
+
+- Binary propagators. (#467)
+- NOOP propagator. (#467)
+
+### Fixed
+
+- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492)
+- Fix a possible nil-dereference crash (#478)
+- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483)
+- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484)
+- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482)
+- Initialize `onError` based on `Config` in prometheus exporter. (#486)
+- Correct module name in prometheus exporter README. (#475)
+- Removed tracer name prefix from span names. (#430)
+- Fix `aggregator_test.go` import package comment. (#431)
+- Improved detail in stdout exporter. (#436)
+- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442)
+- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442)
+- Reword function documentation in gRPC plugin. (#446)
+- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441)
+- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441)
+- Upgraded to Go 1.13 in CI. (#465)
+- Correct opentelemetry.io URL in trace SDK documentation. (#464)
+- Refactored reference counting logic in SDK determination of stale records. (#468)
+- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469)
+
+## [0.2.1.1] - 2020-01-13
+
+### Fixed
+
+- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428)
+
+## [0.2.1] - 2020-01-08
+
+### Added
+
+- Global meter forwarding implementation.
+   This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392)
+- Global trace forwarding implementation.
+   This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406)
+- Standardize export pipeline creation in all exporters. (#395)
+- A testing, organization, and comments for 64-bit field alignment. (#418)
+- Script to tag all modules in the project. (#414)
+
+### Changed
+
+- Renamed `propagation` package to `propagators`. (#362)
+- Renamed `B3Propagator` propagator to `B3`. (#362)
+- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362)
+- Renamed `BinaryPropagator` propagator to `Binary`. (#362)
+- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362)
+- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362)
+- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362)
+- Renamed `SpanOption` to `StartOption` in the trace API. (#369)
+- Renamed `StartOptions` to `StartConfig` in the trace API. (#369)
+- Renamed `EndOptions` to `EndConfig` in the trace API. (#369)
+- `Number` now has a pointer receiver for its methods. (#375)
+- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379)
+- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379)
+- Renamed `Message` in Event to `Name` in the trace API. (#389)
+- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385)
+- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400)
+- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400)
+- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400)
+- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400)
+- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400)
+- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400)
+- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400)
+- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400)
+- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400)
+- Renamed the `File` option in the stdout exporter to `Writer`. (#404)
+- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case.
+
+### Fixed
+
+- Aggregator import path corrected. (#421)
+- Correct links in README. (#368)
+- The README was updated to match latest code changes in its examples. (#374)
+- Don't capitalize error statements. (#375)
+- Fix ignored errors. (#375)
+- Fix ambiguous variable naming. (#375)
+- Removed unnecessary type casting. (#375)
+- Use named parameters. (#375)
+- Updated release schedule. (#378)
+- Correct http-stackdriver example module name. (#394)
+- Removed the `http.request` span in `httptrace` package. (#397)
+- Add comments in the metrics SDK (#399)
+- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403)
+- Add documentation of compatible exporters in the README. (#405)
+- Typo fix. (#408)
+- Simplify span check logic in SDK tracer implementation. (#419)
+
+## [0.2.0] - 2019-12-03
+
+### Added
+
+- Unary gRPC tracing example. (#351)
+- Prometheus exporter. (#334)
+- Dogstatsd metrics exporter. (#326)
+
+### Changed
+
+- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352)
+- Rename `GetMeter` to `Meter`. (#357)
+- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
+- Rename `HTTPB3Propagator` to `B3Propagator`. (#355)
+- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
+- Move `/global` package to `/api/global`. (#356)
+- Rename `GetTracer` to `Tracer`. (#347)
+
+### Removed
+
+- `SetAttribute` from the `Span` interface in the trace API. (#361)
+- `AddLink` from the `Span` interface in the trace API. (#349)
+- `Link` from the `Span` interface in the trace API. (#349)
+
+### Fixed
+
+- Exclude example directories from coverage report. (#365)
+- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360)
+- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359)
+- Run the race checker for all test. (#354)
+- Redundant commands in the Makefile are removed. (#354)
+- Split the `generate` and `lint` targets of the Makefile. (#354)
+- Renames `circle-ci` target to more generic `ci` in Makefile. (#354)
+- Add example Prometheus binary to gitignore. (#358)
+- Support negative numbers with the `MaxSumCount`. (#335)
+- Resolve race conditions in `push_test.go` identified in #339. (#340)
+- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336)
+- Trace benchmark now tests both `AlwaysSample` and `NeverSample`.
+   Previously it was testing `AlwaysSample` twice. (#325)
+- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325)
+- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325)
+- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint.
+   This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
+   This was corrected. (#333)
+
+## [0.1.2] - 2019-11-18
+
+### Fixed
+
+- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328)
+- Removed unnecessary unslicing of parameters that are already a slice. (#324)
+
+## [0.1.1] - 2019-11-18
+
+This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch.
+
+### Added
+
+- Metrics stdout export pipeline. (#265)
+- Array aggregation for raw measure metrics. (#282)
+- The core.Value now have a `MarshalJSON` method. (#281)
+
+### Removed
+
+- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314)
+- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292)
+
+### Changed
+
+- Allocation in LabelSet construction to reduce GC overhead. (#318)
+- `trace.WithAttributes` to append values instead of replacing (#315)
+- Use a formula for tolerance in sampling tests. (#298)
+- Move export types into trace and metric-specific sub-directories. (#289)
+- `SpanKind` back to being based on an `int` type. (#288)
+
+### Fixed
+
+- URL to OpenTelemetry website in README. (#323)
+- Name of othttp default tracer. (#321)
+- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294)
+- CI modules cache to correctly restore/save from/to the cache. (#316)
+- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293)
+- README now reflects the new code structure introduced with these changes. (#291)
+- Make the basic example work. (#279)
+
+## [0.1.0] - 2019-11-04
+
+This is the first release of open-telemetry go library.
+It contains api and sdk for trace and meter.
+
+### Added
+
+- Initial OpenTelemetry trace and metric API prototypes.
+- Initial OpenTelemetry trace, metric, and export SDK packages.
+- A wireframe bridge to support compatibility with OpenTracing.
+- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup.
+- Exporters for Jaeger, Stackdriver, and stdout.
+- Propagators for binary, B3, and trace-context protocols.
+- Project information and guidelines in the form of a README and CONTRIBUTING.
+- Tools to build the project and a Makefile to automate the process.
+- Apache-2.0 license.
+- CircleCI build CI manifest files.
+- CODEOWNERS file to track owners of this project.
+
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.14.0...HEAD
+[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0
+[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0
+[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0
+[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
+[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
+[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
+[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
+[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
+[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0
+[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
+[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
+[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
+[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0
+[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0
+[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3
+[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2
+[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1
+[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0
+[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0
+[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
+[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
+[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
+[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0
+[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0
+[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1
+[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0
+[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0
+[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3
+[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2
+[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0
+[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1
+[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0
+[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0
+[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0
+[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0
+[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0
+[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0
+[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0
+[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0
+[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0
+[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0
+[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0
+[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0
+[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0
+[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0
+[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0
+[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0
+[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3
+[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2
+[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1
+[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0
+[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0
+[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3
+[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2
+[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1
+[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1
+[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0
+[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
+[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
+[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+
+[Go 1.20]: https://go.dev/doc/go1.20
+[Go 1.19]: https://go.dev/doc/go1.19
+[Go 1.18]: https://go.dev/doc/go1.18
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
new file mode 100644
index 000000000..c4012ed6c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -0,0 +1,17 @@
+#####################################################
+#
+# List of approvers for this repository
+#
+#####################################################
+#
+# Learn about membership in OpenTelemetry community:
+#  https://github.com/open-telemetry/community/blob/main/community-membership.md
+#
+#
+# Learn about CODEOWNERS file format:
+#  https://help.github.com/en/articles/about-code-owners
+#
+
+* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
+
+CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
new file mode 100644
index 000000000..a6928bfdf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -0,0 +1,526 @@
+# Contributing to opentelemetry-go
+
+The Go special interest group (SIG) meets regularly. See the
+OpenTelemetry
+[community](https://github.com/open-telemetry/community#golang-sdk)
+repo for information on this and other language SIGs.
+
+See the [public meeting
+notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
+for a summary description of past meetings. To request edit access,
+join the meeting or get in touch on
+[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
+
+## Development
+
+You can view and edit the source code by cloning this repository:
+
+```sh
+git clone https://github.com/open-telemetry/opentelemetry-go.git
+```
+
+Run `make test` to run the tests instead of `go test`.
+
+There are some generated files checked into the repo. To make sure
+that the generated files are up-to-date, run `make` (or `make
+precommit` - the `precommit` target is the default).
+
+The `precommit` target also fixes the formatting of the code and
+checks the status of the go module files.
+
+If after running `make precommit` the output of `git status` contains
+`nothing to commit, working tree clean` then it means that everything
+is up-to-date and properly formatted.
+
+## Pull Requests
+
+### How to Send Pull Requests
+
+Everyone is welcome to contribute code to `opentelemetry-go` via
+GitHub pull requests (PRs).
+
+To create a new PR, fork the project in GitHub and clone the upstream
+repo:
+
+```sh
+go get -d go.opentelemetry.io/otel
+```
+
+(This may print some warning about "build constraints exclude all Go
+files", just ignore it.)
+
+This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
+can alternatively use `git` directly with:
+
+```sh
+git clone https://github.com/open-telemetry/opentelemetry-go
+```
+
+(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
+that name is a kind of a redirector to GitHub that `go get` can
+understand, but `git` does not.)
+
+This would put the project in the `opentelemetry-go` directory in
+current working directory.
+
+Enter the newly created directory and add your fork as a new remote:
+
+```sh
+git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
+```
+
+Check out a new branch, make modifications, run linters and tests, update
+`CHANGELOG.md`, and push the branch to your fork:
+
+```sh
+git checkout -b <YOUR_BRANCH_NAME>
+# edit files
+# update changelog
+make precommit
+git add -p
+git commit
+git push <YOUR_FORK> <YOUR_BRANCH_NAME>
+```
+
+Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
+request ID to the entry you added to `CHANGELOG.md`.
+
+### How to Receive Comments
+
+* If the PR is not ready for review, please put `[WIP]` in the title,
+  tag it as `work-in-progress`, or mark it as
+  [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
+* Make sure CLA is signed and CI is clear.
+
+### How to Get PRs Merged
+
+A PR is considered to be **ready to merge** when:
+
+* It has received two approvals from Collaborators/Maintainers (at
+  different companies). This is not enforced through technical means
+  and a PR may be **ready to merge** with a single approval if the change
+  and its approach have been discussed and consensus reached.
+* Feedback has been addressed.
+* Any substantive changes to your PR will require that you clear any prior
+  Approval reviews, this includes changes resulting from other feedback. Unless
+  the approver explicitly stated that their approval will persist across
+  changes it should be assumed that the PR needs their review again. Other
+  project members (e.g. approvers, maintainers) can help with this if there are
+  any questions or if you forget to clear reviews.
+* It has been open for review for at least one working day. This gives
+  people reasonable time to review.
+* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for
+  one day and may be merged with a single Maintainer's approval.
+* `CHANGELOG.md` has been updated to reflect what has been
+  added, changed, removed, or fixed.
+* `README.md` has been updated if necessary.
+* Urgent fix can take exception as long as it has been actively
+  communicated.
+
+Any Maintainer can merge the PR once it is **ready to merge**.
+
+## Design Choices
+
+As with other OpenTelemetry clients, opentelemetry-go follows the
+[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification).
+
+It's especially valuable to read through the [library
+guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md).
+
+### Focus on Capabilities, Not Structure Compliance
+
+OpenTelemetry is an evolving specification, one where the desires and
+use cases are clear, but the method to satisfy those uses cases are
+not.
+
+As such, Contributions should provide functionality and behavior that
+conforms to the specification, but the interface and structure is
+flexible.
+
+It is preferable to have contributions follow the idioms of the
+language rather than conform to specific API names or argument
+patterns in the spec.
+
+For a deeper discussion, see
+[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
+
+## Documentation
+
+Each non-example Go Module should have its own `README.md` containing:
+
+- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/).
+- Brief description.
+- Installation instructions (and requirements if applicable).
+- Hyperlink to an example. Depending on the component the example can be:
+  - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go).
+  - A sample Go application with its own `README.md`, like [here](example/zipkin).
+- Additional documentation sections such us:
+  - Configuration,
+  - Contributing,
+  - References.
+
+[Here](exporters/jaeger/README.md) is an example of a concise `README.md`.
+
+Moreover, it should be possible to navigate to any `README.md` from the
+root `README.md`.
+
+## Style Guide
+
+One of the primary goals of this project is that it is actually used by
+developers. With this goal in mind the project strives to build
+user-friendly and idiomatic Go code adhering to the Go community's best
+practices.
+
+For a non-comprehensive but foundational overview of these best practices
+the [Effective Go](https://golang.org/doc/effective_go.html) documentation
+is an excellent starting place.
+
+As a convenience for developers building this project the `make precommit`
+will format, lint, validate, and in some cases fix the changes you plan to
+submit. This check will need to pass for your changes to be able to be
+merged.
+
+In addition to idiomatic Go, the project has adopted certain standards for
+implementations of common patterns. These standards should be followed as a
+default, and if they are not followed documentation needs to be included as
+to the reasons why.
+
+### Configuration
+
+When creating an instantiation function for a complex `type T struct`, it is
+useful to allow variable number of options to be applied. However, the strong
+type system of Go restricts the function design options. There are a few ways
+to solve this problem, but we have landed on the following design.
+
+#### `config`
+
+Configuration should be held in a `struct` named `config`, or prefixed with
+specific type name this Configuration applies to if there are multiple
+`config` in the package. This type must contain configuration options.
+
+```go
+// config contains configuration options for a thing.
+type config struct {
+	// options ...
+}
+```
+
+In general the `config` type will not need to be used externally to the
+package and should be unexported. If, however, it is expected that the user
+will likely want to build custom options for the configuration, the `config`
+should be exported. Please, include in the documentation for the `config`
+how the user can extend the configuration.
+
+It is important that internal `config` are not shared across package boundaries.
+Meaning a `config` from one package should not be directly used by another. The
+one exception is the API packages.  The configs from the base API, eg.
+`go.opentelemetry.io/otel/trace.TracerConfig` and
+`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
+by the SDK therefor it is expected that these are exported.
+
+When a config is exported we want to maintain forward and backward
+compatibility, to achieve this no fields should be exported but should
+instead be accessed by methods.
+
+Optionally, it is common to include a `newConfig` function (with the same
+naming scheme). This function wraps any defaults setting and looping over
+all options to create a configured `config`.
+
+```go
+// newConfig returns an appropriately configured config.
+func newConfig(options ...Option) config {
+	// Set default values for config.
+	config := config{/* […] */}
+	for _, option := range options {
+		config = option.apply(config)
+	}
+	// Preform any validation here.
+	return config
+}
+```
+
+If validation of the `config` options is also preformed this can return an
+error as well that is expected to be handled by the instantiation function
+or propagated to the user.
+
+Given the design goal of not having the user need to work with the `config`,
+the `newConfig` function should also be unexported.
+
+#### `Option`
+
+To set the value of the options a `config` contains, a corresponding
+`Option` interface type should be used.
+
+```go
+type Option interface {
+	apply(config) config
+}
+```
+
+Having `apply` unexported makes sure that it will not be used externally.
+Moreover, the interface becomes sealed so the user cannot easily implement
+the interface on its own.
+
+The `apply` method should return a modified version of the passed config.
+This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
+
+The name of the interface should be prefixed in the same way the
+corresponding `config` is (if at all).
+
+#### Options
+
+All user configurable options for a `config` must have a related unexported
+implementation of the `Option` interface and an exported configuration
+function that wraps this implementation.
+
+The wrapping function name should be prefixed with `With*` (or in the
+special case of a boolean options `Without*`) and should have the following
+function signature.
+
+```go
+func With*(…) Option { … }
+```
+
+##### `bool` Options
+
+```go
+type defaultFalseOption bool
+
+func (o defaultFalseOption) apply(c config) config {
+	c.Bool = bool(o)
+    return c
+}
+
+// WithOption sets a T to have an option included.
+func WithOption() Option {
+	return defaultFalseOption(true)
+}
+```
+
+```go
+type defaultTrueOption bool
+
+func (o defaultTrueOption) apply(c config) config {
+	c.Bool = bool(o)
+    return c
+}
+
+// WithoutOption sets a T to have Bool option excluded.
+func WithoutOption() Option {
+	return defaultTrueOption(false)
+}
+```
+
+##### Declared Type Options
+
+```go
+type myTypeOption struct {
+	MyType MyType
+}
+
+func (o myTypeOption) apply(c config) config {
+	c.MyType = o.MyType
+    return c
+}
+
+// WithMyType sets T to have include MyType.
+func WithMyType(t MyType) Option {
+	return myTypeOption{t}
+}
+```
+
+##### Functional Options
+
+```go
+type optionFunc func(config) config
+
+func (fn optionFunc) apply(c config) config {
+	return fn(c)
+}
+
+// WithMyType sets t as MyType.
+func WithMyType(t MyType) Option {
+	return optionFunc(func(c config) config {
+		c.MyType = t
+        return c
+	})
+}
+```
+
+#### Instantiation
+
+Using this configuration pattern to configure instantiation with a `NewT`
+function.
+
+```go
+func NewT(options ...Option) T {…}
+```
+
+Any required parameters can be declared before the variadic `options`.
+
+#### Dealing with Overlap
+
+Sometimes there are multiple complex `struct` that share common
+configuration and also have distinct configuration. To avoid repeated
+portions of `config`s, a common `config` can be used with the union of
+options being handled with the `Option` interface.
+
+For example.
+
+```go
+// config holds options for all animals.
+type config struct {
+	Weight      float64
+	Color       string
+	MaxAltitude float64
+}
+
+// DogOption apply Dog specific options.
+type DogOption interface {
+	applyDog(config) config
+}
+
+// BirdOption apply Bird specific options.
+type BirdOption interface {
+	applyBird(config) config
+}
+
+// Option apply options for all animals.
+type Option interface {
+	BirdOption
+	DogOption
+}
+
+type weightOption float64
+
+func (o weightOption) applyDog(c config) config {
+	c.Weight = float64(o)
+	return c
+}
+
+func (o weightOption) applyBird(c config) config {
+	c.Weight = float64(o)
+	return c
+}
+
+func WithWeight(w float64) Option { return weightOption(w) }
+
+type furColorOption string
+
+func (o furColorOption) applyDog(c config) config {
+	c.Color = string(o)
+	return c
+}
+
+func WithFurColor(c string) DogOption { return furColorOption(c) }
+
+type maxAltitudeOption float64
+
+func (o maxAltitudeOption) applyBird(c config) config {
+	c.MaxAltitude = float64(o)
+	return c
+}
+
+func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
+
+func NewDog(name string, o ...DogOption) Dog    {…}
+func NewBird(name string, o ...BirdOption) Bird {…}
+```
+
+### Interfaces
+
+To allow other developers to better comprehend the code, it is important
+to ensure it is sufficiently documented. One simple measure that contributes
+to this aim is self-documenting by naming method parameters. Therefore,
+where appropriate, methods of every exported interface type should have
+their parameters appropriately named.
+
+#### Interface Stability
+
+All exported stable interfaces that include the following warning in their
+doumentation are allowed to be extended with additional methods.
+
+> Warning: methods may be added to this interface in minor releases.
+
+Otherwise, stable interfaces MUST NOT be modified.
+
+If new functionality is needed for an interface that cannot be changed it MUST
+be added by including an additional interface. That added interface can be a
+simple interface for the specific functionality that you want to add or it can
+be a super-set of the original interface. For example, if you wanted to a
+`Close` method to the `Exporter` interface:
+
+```go
+type Exporter interface {
+	Export()
+}
+```
+
+A new interface, `Closer`, can be added:
+
+```go
+type Closer interface {
+	Close()
+}
+```
+
+Code that is passed the `Exporter` interface can now check to see if the passed
+value also satisfies the new interface. E.g.
+
+```go
+func caller(e Exporter) {
+	/* ... */
+	if c, ok := e.(Closer); ok {
+		c.Close()
+	}
+	/* ... */
+}
+```
+
+Alternatively, a new type that is the super-set of an `Exporter` can be created.
+
+```go
+type ClosingExporter struct {
+	Exporter
+	Close()
+}
+```
+
+This new type can be used similar to the simple interface above in that a
+passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
+and the `Close` method called.
+
+This super-set approach can be useful if there is explicit behavior that needs
+to be coupled with the original type and passed as a unified type to a new
+function, but, because of this coupling, it also limits the applicability of
+the added functionality. If there exist other interfaces where this
+functionality should be added, each one will need their own super-set
+interfaces and will duplicate the pattern. For this reason, the simple targeted
+interface that defines the specific functionality should be preferred.
+
+## Approvers and Maintainers
+
+Approvers:
+
+- [Evan Torrie](https://github.com/evantorrie), Verizon Media
+- [Josh MacDonald](https://github.com/jmacd), LightStep
+- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
+- [David Ashpole](https://github.com/dashpole), Google
+- [Robert Pająk](https://github.com/pellared), Splunk
+- [Chester Cheung](https://github.com/hanyuancheung), Tencent
+- [Damien Mathieu](https://github.com/dmathieu), Elastic
+
+Maintainers:
+
+- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
+- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
+- [Tyler Yahn](https://github.com/MrAlias), Splunk
+
+Emeritus:
+
+- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
+
+### Become an Approver or a Maintainer
+
+See the [community membership document in OpenTelemetry community
+repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
new file mode 100644
index 000000000..0e6ffa284
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -0,0 +1,227 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+TOOLS_MOD_DIR := ./internal/tools
+
+ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
+ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
+ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
+
+GO = go
+TIMEOUT = 60
+
+.DEFAULT_GOAL := precommit
+
+.PHONY: precommit ci
+precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default
+ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
+
+# Tools
+
+TOOLS = $(CURDIR)/.tools
+
+$(TOOLS):
+	@mkdir -p $@
+$(TOOLS)/%: | $(TOOLS)
+	cd $(TOOLS_MOD_DIR) && \
+	$(GO) build -o $@ $(PACKAGE)
+
+MULTIMOD = $(TOOLS)/multimod
+$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
+
+SEMCONVGEN = $(TOOLS)/semconvgen
+$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
+
+CROSSLINK = $(TOOLS)/crosslink
+$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
+
+SEMCONVKIT = $(TOOLS)/semconvkit
+$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
+
+DBOTCONF = $(TOOLS)/dbotconf
+$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
+
+GOLANGCI_LINT = $(TOOLS)/golangci-lint
+$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
+
+MISSPELL = $(TOOLS)/misspell
+$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
+
+GOCOVMERGE = $(TOOLS)/gocovmerge
+$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge
+
+STRINGER = $(TOOLS)/stringer
+$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
+
+PORTO = $(TOOLS)/porto
+$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
+
+GOJQ = $(TOOLS)/gojq
+$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
+
+.PHONY: tools
+tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT)
+
+# Build
+
+.PHONY: generate build
+
+generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
+generate/%: DIR=$*
+generate/%: | $(STRINGER) $(PORTO)
+	@echo "$(GO) generate $(DIR)/..." \
+		&& cd $(DIR) \
+		&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
+
+build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
+build/%: DIR=$*
+build/%:
+	@echo "$(GO) build $(DIR)/..." \
+		&& cd $(DIR) \
+		&& $(GO) build ./...
+
+build-tests/%: DIR=$*
+build-tests/%:
+	@echo "$(GO) build tests $(DIR)/..." \
+		&& cd $(DIR) \
+		&& $(GO) list ./... \
+		| grep -v third_party \
+		| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
+
+# Tests
+
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race
+.PHONY: $(TEST_TARGETS) test
+test-default test-race: ARGS=-race
+test-bench:   ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
+test-short:   ARGS=-short
+test-verbose: ARGS=-v -race
+$(TEST_TARGETS): test
+test: $(OTEL_GO_MOD_DIRS:%=test/%)
+test/%: DIR=$*
+test/%:
+	@echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
+		&& cd $(DIR) \
+		&& $(GO) list ./... \
+		| grep -v third_party \
+		| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
+
+COVERAGE_MODE    = atomic
+COVERAGE_PROFILE = coverage.out
+.PHONY: test-coverage
+test-coverage: | $(GOCOVMERGE)
+	@set -e; \
+	printf "" > coverage.txt; \
+	for dir in $(ALL_COVERAGE_MOD_DIRS); do \
+	  echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
+	  (cd "$${dir}" && \
+	    $(GO) list ./... \
+	    | grep -v third_party \
+	    | grep -v 'semconv/v.*' \
+	    | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
+	  $(GO) tool cover -html=coverage.out -o coverage.html); \
+	done; \
+	$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
+
+.PHONY: golangci-lint golangci-lint-fix
+golangci-lint-fix: ARGS=--fix
+golangci-lint-fix: golangci-lint
+golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
+golangci-lint/%: DIR=$*
+golangci-lint/%: | $(GOLANGCI_LINT)
+	@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
+		&& cd $(DIR) \
+		&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
+
+.PHONY: crosslink
+crosslink: | $(CROSSLINK)
+	@echo "Updating intra-repository dependencies in all go modules" \
+		&& $(CROSSLINK) --root=$(shell pwd) --prune
+
+.PHONY: go-mod-tidy
+go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
+go-mod-tidy/%: DIR=$*
+go-mod-tidy/%: | crosslink
+	@echo "$(GO) mod tidy in $(DIR)" \
+		&& cd $(DIR) \
+		&& $(GO) mod tidy -compat=1.18
+
+.PHONY: lint-modules
+lint-modules: go-mod-tidy
+
+.PHONY: lint
+lint: misspell lint-modules golangci-lint
+
+.PHONY: vanity-import-check
+vanity-import-check: | $(PORTO)
+	@$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)"
+
+.PHONY: vanity-import-fix
+vanity-import-fix: | $(PORTO)
+	@$(PORTO) --include-internal -w .
+
+.PHONY: misspell
+misspell: | $(MISSPELL)
+	@$(MISSPELL) -w $(ALL_DOCS)
+
+.PHONY: license-check
+license-check:
+	@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
+	           awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
+	   done); \
+	   if [ -n "$${licRes}" ]; then \
+	           echo "license header checking failed:"; echo "$${licRes}"; \
+	           exit 1; \
+	   fi
+
+DEPENDABOT_CONFIG = .github/dependabot.yml
+.PHONY: dependabot-check
+dependabot-check: | $(DBOTCONF)
+	@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)"
+
+.PHONY: dependabot-generate
+dependabot-generate: | $(DBOTCONF)
+	@$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
+
+.PHONY: check-clean-work-tree
+check-clean-work-tree:
+	@if ! git diff --quiet; then \
+	  echo; \
+	  echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
+	  echo; \
+	  git status; \
+	  exit 1; \
+	fi
+
+SEMCONVPKG ?= "semconv/"
+.PHONY: semconv-generate
+semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
+	[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
+	[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
+	$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+	$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+	$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+	$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
+
+.PHONY: prerelease
+prerelease: | $(MULTIMOD)
+	@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
+	$(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET}
+
+COMMIT ?= "HEAD"
+.PHONY: add-tags
+add-tags: | $(MULTIMOD)
+	@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
+	$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
new file mode 100644
index 000000000..878d87e58
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -0,0 +1,114 @@
+# OpenTelemetry-Go
+
+[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
+[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
+[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel)
+[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
+[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
+
+OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
+It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
+
+## Project Status
+
+| Signal  | Status     | Project |
+| ------- | ---------- | ------- |
+| Traces  | Stable     | N/A     |
+| Metrics | Alpha      | N/A     |
+| Logs    | Frozen [1] | N/A     |
+
+- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics.
+   No Logs Pull Requests are currently being accepted.
+
+Progress and status specific to this repository is tracked in our local
+[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
+and
+[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
+
+Project versioning information and stability guarantees can be found in the
+[versioning documentation](./VERSIONING.md).
+
+### Compatibility
+
+OpenTelemetry-Go ensures compatibility with the current supported versions of
+the [Go language](https://golang.org/doc/devel/release#policy):
+
+> Each major Go release is supported until there are two newer major releases.
+> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release.
+
+For versions of Go that are no longer supported upstream, opentelemetry-go will
+stop ensuring compatibility with these versions in the following manner:
+
+- A minor release of opentelemetry-go will be made to add support for the new
+  supported release of Go.
+- The following minor release of opentelemetry-go will remove compatibility
+  testing for the oldest (now archived upstream) version of Go. This, and
+  future, releases of opentelemetry-go may include features only supported by
+  the currently supported versions of Go.
+
+Currently, this project supports the following environments.
+
+| OS      | Go Version | Architecture |
+| ------- | ---------- | ------------ |
+| Ubuntu  | 1.20       | amd64        |
+| Ubuntu  | 1.19       | amd64        |
+| Ubuntu  | 1.18       | amd64        |
+| Ubuntu  | 1.20       | 386          |
+| Ubuntu  | 1.19       | 386          |
+| Ubuntu  | 1.18       | 386          |
+| MacOS   | 1.20       | amd64        |
+| MacOS   | 1.19       | amd64        |
+| MacOS   | 1.18       | amd64        |
+| Windows | 1.20       | amd64        |
+| Windows | 1.19       | amd64        |
+| Windows | 1.18       | amd64        |
+| Windows | 1.20       | 386          |
+| Windows | 1.19       | 386          |
+| Windows | 1.18       | 386          |
+
+While this project should work for other systems, no compatibility guarantees
+are made for those systems currently.
+
+## Getting Started
+
+You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/).
+
+OpenTelemetry's goal is to provide a single set of APIs to capture distributed
+traces and metrics from your application and send them to an observability
+platform. This project allows you to do just that for applications written in
+Go. There are two steps to this process: instrument your application, and
+configure an exporter.
+
+### Instrumentation
+
+To start capturing distributed traces and metric events from your application
+it first needs to be instrumented. The easiest way to do this is by using an
+instrumentation library for your code. Be sure to check out [the officially
+supported instrumentation
+libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation).
+
+If you need to extend the telemetry an instrumentation library provides or want
+to build your own instrumentation for your application directly you will need
+to use the
+[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
+package. The included [examples](./example/) are a good way to see some
+practical uses of this process.
+
+### Export
+
+Now that your application is instrumented to collect telemetry, it needs an
+export pipeline to send that telemetry to an observability platform.
+
+All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
+
+| Exporter                              | Metrics | Traces |
+| :-----------------------------------: | :-----: | :----: |
+| [Jaeger](./exporters/jaeger/)         |         | ✓      |
+| [OTLP](./exporters/otlp/)             | ✓       | ✓      |
+| [Prometheus](./exporters/prometheus/) | ✓       |        |
+| [stdout](./exporters/stdout/)         | ✓       | ✓      |
+| [Zipkin](./exporters/zipkin/)         |         | ✓      |
+
+## Contributing
+
+See the [contributing documentation](CONTRIBUTING.md).
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
new file mode 100644
index 000000000..77d56c936
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -0,0 +1,127 @@
+# Release Process
+
+## Semantic Convention Generation
+
+New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated.
+The `semconv-generate` make target is used for this.
+
+1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag.
+2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
+3. Run the `make semconv-generate ...` target from this repository.
+
+For example,
+
+```sh
+export TAG="v1.13.0" # Change to the release version you are generating.
+export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
+git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG"
+docker pull otel/semconvgen:latest
+make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
+```
+
+This should create a new sub-package of [`semconv`](./semconv).
+Ensure things look correct before submitting a pull request to include the addition.
+
+**Note**, the generation code was changed to generate versions >= 1.13.
+To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)).
+
+## Pre-Release
+
+First, decide which module sets will be released and update their versions
+in `versions.yaml`.  Commit this change to a new branch.
+
+Update go.mod for submodules to depend on the new release which will happen in the next step.
+
+1. Run the `prerelease` make target. It creates a branch
+    `prerelease_<module set>_<new tag>` that will contain all release changes.
+
+    ```
+    make prerelease MODSET=<module set>
+    ```
+
+2. Verify the changes.
+
+    ```
+    git diff ...prerelease_<module set>_<new tag>
+    ```
+
+    This should have changed the version for all modules to be `<new tag>`.
+    If these changes look correct, merge them into your pre-release branch:
+
+    ```go
+    git merge prerelease_<module set>_<new tag>
+    ```
+
+3. Update the [Changelog](./CHANGELOG.md).
+   - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
+       To verify this, you can look directly at the commits since the `<last tag>`.
+
+       ```
+       git --no-pager log --pretty=oneline "<last tag>..HEAD"
+       ```
+
+   - Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
+   - Update all the appropriate links at the bottom.
+
+4. Push the changes to upstream and create a Pull Request on GitHub.
+    Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
+
+## Tag
+
+Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
+
+***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
+Failure to do so will leave things in a broken state. As long as you do not
+change `versions.yaml` between pre-release and this step, things should be fine.
+
+***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
+It is critical you make sure the version you push upstream is correct.
+[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
+
+1. For each module set that will be released, run the `add-tags` make target
+    using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
+
+    ```
+    make add-tags MODSET=<module set> COMMIT=<commit hash>
+    ```
+
+    It should only be necessary to provide an explicit `COMMIT` value if the
+    current `HEAD` of your working directory is not the correct commit.
+
+2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
+    Make sure you push all sub-modules as well.
+
+    ```
+    git push upstream <new tag>
+    git push upstream <submodules-path/new tag>
+    ...
+    ```
+
+## Release
+
+Finally create a Release for the new `<new tag>` on GitHub.
+The release body should include all the release notes from the Changelog for this release.
+
+## Verify Examples
+
+After releasing verify that examples build outside of the repository.
+
+```
+./verify_examples.sh
+```
+
+The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
+This ensures they build with the published release, not the local copy.
+
+## Post-Release
+
+### Contrib Repository
+
+Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
+
+### Website Documentation
+
+Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
+Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
+
+[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification
diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md
new file mode 100644
index 000000000..412f1e362
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md
@@ -0,0 +1,224 @@
+# Versioning
+
+This document describes the versioning policy for this repository. This policy
+is designed so the following goals can be achieved.
+
+**Users are provided a codebase of value that is stable and secure.**
+
+## Policy
+
+* Versioning of this project will be idiomatic of a Go project using [Go
+  modules](https://github.com/golang/go/wiki/Modules).
+  * [Semantic import
+    versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
+    will be used.
+    * Versions will comply with [semver
+      2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
+      * New methods may be added to exported API interfaces. All exported
+        interfaces that fall within this exception will include the following
+        paragraph in their public documentation.
+
+        > Warning: methods may be added to this interface in minor releases.
+
+    * If a module is version `v2` or higher, the major version of the module
+      must be included as a `/vN` at the end of the module paths used in
+      `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
+      go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
+      (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
+      paths used in `go get` commands (e.g., `go get
+      go.opentelemetry.io/otel/v2@v2.0.1`.  Note there is both a `/v2` and a
+      `@v2.0.1` in that example. One way to think about it is that the module
+      name now includes the `/v2`, so include `/v2` whenever you are using the
+      module name).
+    * If a module is version `v0` or `v1`, do not include the major version in
+      either the module path or the import path.
+  * Modules will be used to encapsulate signals and components.
+    * Experimental modules still under active development will be versioned at
+      `v0` to imply the stability guarantee defined by
+      [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
+
+      > Major version zero (0.y.z) is for initial development. Anything MAY
+      > change at any time. The public API SHOULD NOT be considered stable.
+
+    * Mature modules for which we guarantee a stable public API will be versioned
+      with a major version greater than `v0`.
+      * The decision to make a module stable will be made on a case-by-case
+        basis by the maintainers of this project.
+    * Experimental modules will start their versioning at `v0.0.0` and will
+      increment their minor version when backwards incompatible changes are
+      released and increment their patch version when backwards compatible
+      changes are released.
+    * All stable modules that use the same major version number will use the
+      same entire version number.
+      * Stable modules may be released with an incremented minor or patch
+        version even though that module has not been changed, but rather so
+        that it will remain at the same version as other stable modules that
+        did undergo change.
+      * When an experimental module becomes stable a new stable module version
+        will be released and will include this now stable module. The new
+        stable module version will be an increment of the minor version number
+        and will be applied to all existing stable modules as well as the newly
+        stable module being released.
+* Versioning of the associated [contrib
+  repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of
+  this project will be idiomatic of a Go project using [Go
+  modules](https://github.com/golang/go/wiki/Modules).
+  * [Semantic import
+    versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
+    will be used.
+    * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
+    * If a module is version `v2` or higher, the
+      major version of the module must be included as a `/vN` at the end of the
+      module paths used in `go.mod` files (e.g., `module
+      go.opentelemetry.io/contrib/instrumentation/host/v2`, `require
+      go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the
+      package import path (e.g., `import
+      "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes
+      the paths used in `go get` commands (e.g., `go get
+      go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`.  Note there
+      is both a `/v2` and a `@v2.0.1` in that example. One way to think about
+      it is that the module name now includes the `/v2`, so include `/v2`
+      whenever you are using the module name).
+    * If a module is version `v0` or `v1`, do not include the major version
+      in either the module path or the import path.
+  * In addition to public APIs, telemetry produced by stable instrumentation
+    will remain stable and backwards compatible. This is to avoid breaking
+    alerts and dashboard.
+  * Modules will be used to encapsulate instrumentation, detectors, exporters,
+    propagators, and any other independent sets of related components.
+    * Experimental modules still under active development will be versioned at
+      `v0` to imply the stability guarantee defined by
+      [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
+
+      > Major version zero (0.y.z) is for initial development. Anything MAY
+      > change at any time. The public API SHOULD NOT be considered stable.
+
+    * Mature modules for which we guarantee a stable public API and telemetry will
+      be versioned with a major version greater than `v0`.
+    * Experimental modules will start their versioning at `v0.0.0` and will
+      increment their minor version when backwards incompatible changes are
+      released and increment their patch version when backwards compatible
+      changes are released.
+    * Stable contrib modules cannot depend on experimental modules from this
+      project.
+    * All stable contrib modules of the same major version with this project
+      will use the same entire version as this project.
+      * Stable modules may be released with an incremented minor or patch
+        version even though that module's code has not been changed. Instead
+        the only change that will have been included is to have updated that
+        modules dependency on this project's stable APIs.
+      * When an experimental module in contrib becomes stable a new stable
+        module version will be released and will include this now stable
+        module. The new stable module version will be an increment of the minor
+        version number and will be applied to all existing stable contrib
+        modules, this project's modules, and the newly stable module being
+        released.
+  * Contrib modules will be kept up to date with this project's releases.
+    * Due to the dependency contrib modules will implicitly have on this
+      project's modules the release of stable contrib modules to match the
+      released version number will be staggered after this project's release.
+      There is no explicit time guarantee for how long after this projects
+      release the contrib release will be. Effort should be made to keep them
+      as close in time as possible.
+    * No additional stable release in this project can be made until the
+      contrib repository has a matching stable release.
+    * No release can be made in the contrib repository after this project's
+      stable release except for a stable release of the contrib repository.
+* GitHub releases will be made for all releases.
+* Go modules will be made available at Go package mirrors.
+
+## Example Versioning Lifecycle
+
+To better understand the implementation of the above policy the following
+example is provided. This project is simplified to include only the following
+modules and their versions:
+
+* `otel`: `v0.14.0`
+* `otel/trace`: `v0.14.0`
+* `otel/metric`: `v0.14.0`
+* `otel/baggage`: `v0.14.0`
+* `otel/sdk/trace`: `v0.14.0`
+* `otel/sdk/metric`: `v0.14.0`
+
+These modules have been developed to a point where the `otel/trace`,
+`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they
+should be considered for a stable release. The `otel/metric` and
+`otel/sdk/metric` are still under active development and the `otel` module
+depends on both `otel/trace` and `otel/metric`.
+
+The `otel` package is refactored to remove its dependencies on `otel/metric` so
+it can be released as stable as well. With that done the following release
+candidates are made:
+
+* `otel`: `v1.0.0-RC1`
+* `otel/trace`: `v1.0.0-RC1`
+* `otel/baggage`: `v1.0.0-RC1`
+* `otel/sdk/trace`: `v1.0.0-RC1`
+
+The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
+
+A few minor issues are discovered in the `otel/trace` package. These issues are
+resolved with some minor, but backwards incompatible, changes and are released
+as a second release candidate:
+
+* `otel`: `v1.0.0-RC2`
+* `otel/trace`: `v1.0.0-RC2`
+* `otel/baggage`: `v1.0.0-RC2`
+* `otel/sdk/trace`: `v1.0.0-RC2`
+
+Notice that all module version numbers are incremented to adhere to our
+versioning policy.
+
+After these release candidates have been evaluated to satisfaction, they are
+released as version `v1.0.0`.
+
+* `otel`: `v1.0.0`
+* `otel/trace`: `v1.0.0`
+* `otel/baggage`: `v1.0.0`
+* `otel/sdk/trace`: `v1.0.0`
+
+Since both the `go` utility and the Go module system support [the semantic
+versioning definition of
+precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release
+will correctly be interpreted as the successor to the previous release
+candidates.
+
+Active development of this project continues. The `otel/metric` module now has
+backwards incompatible changes to its API that need to be released and the
+`otel/baggage` module has a minor bug fix that needs to be released. The
+following release is made:
+
+* `otel`: `v1.0.1`
+* `otel/trace`: `v1.0.1`
+* `otel/metric`: `v0.15.0`
+* `otel/baggage`: `v1.0.1`
+* `otel/sdk/trace`: `v1.0.1`
+* `otel/sdk/metric`: `v0.15.0`
+
+Notice that, again, all stable module versions are incremented in unison and
+the `otel/sdk/metric` package, which depends on the `otel/metric` package, also
+bumped its version. This bump of the `otel/sdk/metric` package makes sense
+given their coupling, though it is not explicitly required by our versioning
+policy.
+
+As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
+point where they should be evaluated for stability. The `otel` module is
+reintegrated with the `otel/metric` package and the following release is made:
+
+* `otel`: `v1.1.0-RC1`
+* `otel/trace`: `v1.1.0-RC1`
+* `otel/metric`: `v1.1.0-RC1`
+* `otel/baggage`: `v1.1.0-RC1`
+* `otel/sdk/trace`: `v1.1.0-RC1`
+* `otel/sdk/metric`: `v1.1.0-RC1`
+
+All the modules are evaluated and determined to a viable stable release. They
+are then released as version `v1.1.0` (the minor version is incremented to
+indicate the addition of new signal).
+
+* `otel`: `v1.1.0`
+* `otel/trace`: `v1.1.0`
+* `otel/metric`: `v1.1.0`
+* `otel/baggage`: `v1.1.0`
+* `otel/sdk/trace`: `v1.1.0`
+* `otel/sdk/metric`: `v1.1.0`
diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go
new file mode 100644
index 000000000..dafe7424d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package attribute provides key and value attributes.
+package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
new file mode 100644
index 000000000..fe2bc5766
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -0,0 +1,146 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+	"bytes"
+	"sync"
+	"sync/atomic"
+)
+
+type (
+	// Encoder is a mechanism for serializing an attribute set into a specific
+	// string representation that supports caching, to avoid repeated
+	// serialization. An example could be an exporter encoding the attribute
+	// set into a wire representation.
+	Encoder interface {
+		// Encode returns the serialized encoding of the attribute set using
+		// its Iterator. This result may be cached by a attribute.Set.
+		Encode(iterator Iterator) string
+
+		// ID returns a value that is unique for each class of attribute
+		// encoder. Attribute encoders allocate these using `NewEncoderID`.
+		ID() EncoderID
+	}
+
+	// EncoderID is used to identify distinct Encoder
+	// implementations, for caching encoded results.
+	EncoderID struct {
+		value uint64
+	}
+
+	// defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of
+	// allocations used in encoding attributes. This implementation encodes a
+	// comma-separated list of key=value, with '/'-escaping of '=', ',', and
+	// '\'.
+	defaultAttrEncoder struct {
+		// pool is a pool of attribute set builders. The buffers in this pool
+		// grow to a size that most attribute encodings will not allocate new
+		// memory.
+		pool sync.Pool // *bytes.Buffer
+	}
+)
+
+// escapeChar is used to ensure uniqueness of the attribute encoding where
+// keys or values contain either '=' or ','.  Since there is no parser needed
+// for this encoding and its only requirement is to be unique, this choice is
+// arbitrary.  Users will see these in some exporters (e.g., stdout), so the
+// backslash ('\') is used as a conventional choice.
+const escapeChar = '\\'
+
+var (
+	_ Encoder = &defaultAttrEncoder{}
+
+	// encoderIDCounter is for generating IDs for other attribute encoders.
+	encoderIDCounter uint64
+
+	defaultEncoderOnce     sync.Once
+	defaultEncoderID       = NewEncoderID()
+	defaultEncoderInstance *defaultAttrEncoder
+)
+
+// NewEncoderID returns a unique attribute encoder ID. It should be called
+// once per each type of attribute encoder. Preferably in init() or in var
+// definition.
+func NewEncoderID() EncoderID {
+	return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
+}
+
+// DefaultEncoder returns an attribute encoder that encodes attributes in such
+// a way that each escaped attribute's key is followed by an equal sign and
+// then by an escaped attribute's value. All key-value pairs are separated by
+// a comma.
+//
+// Escaping is done by prepending a backslash before either a backslash, equal
+// sign or a comma.
+func DefaultEncoder() Encoder {
+	defaultEncoderOnce.Do(func() {
+		defaultEncoderInstance = &defaultAttrEncoder{
+			pool: sync.Pool{
+				New: func() interface{} {
+					return &bytes.Buffer{}
+				},
+			},
+		}
+	})
+	return defaultEncoderInstance
+}
+
+// Encode is a part of an implementation of the AttributeEncoder interface.
+func (d *defaultAttrEncoder) Encode(iter Iterator) string {
+	buf := d.pool.Get().(*bytes.Buffer)
+	defer d.pool.Put(buf)
+	buf.Reset()
+
+	for iter.Next() {
+		i, keyValue := iter.IndexedAttribute()
+		if i > 0 {
+			_, _ = buf.WriteRune(',')
+		}
+		copyAndEscape(buf, string(keyValue.Key))
+
+		_, _ = buf.WriteRune('=')
+
+		if keyValue.Value.Type() == STRING {
+			copyAndEscape(buf, keyValue.Value.AsString())
+		} else {
+			_, _ = buf.WriteString(keyValue.Value.Emit())
+		}
+	}
+	return buf.String()
+}
+
+// ID is a part of an implementation of the AttributeEncoder interface.
+func (*defaultAttrEncoder) ID() EncoderID {
+	return defaultEncoderID
+}
+
+// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
+// making the default encoding unique.
+func copyAndEscape(buf *bytes.Buffer, val string) {
+	for _, ch := range val {
+		switch ch {
+		case '=', ',', escapeChar:
+			_, _ = buf.WriteRune(escapeChar)
+		}
+		_, _ = buf.WriteRune(ch)
+	}
+}
+
+// Valid returns true if this encoder ID was allocated by
+// `NewEncoderID`.  Invalid encoder IDs will not be cached.
+func (id EncoderID) Valid() bool {
+	return id.value != 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
new file mode 100644
index 000000000..841b271fb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
@@ -0,0 +1,161 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Iterator allows iterating over the set of attributes in order, sorted by
+// key.
+type Iterator struct {
+	storage *Set
+	idx     int
+}
+
+// MergeIterator supports iterating over two sets of attributes while
+// eliminating duplicate values from the combined set. The first iterator
+// value takes precedence.
+type MergeIterator struct {
+	one     oneIterator
+	two     oneIterator
+	current KeyValue
+}
+
+type oneIterator struct {
+	iter Iterator
+	done bool
+	attr KeyValue
+}
+
+// Next moves the iterator to the next position. Returns false if there are no
+// more attributes.
+func (i *Iterator) Next() bool {
+	i.idx++
+	return i.idx < i.Len()
+}
+
+// Label returns current KeyValue. Must be called only after Next returns
+// true.
+//
+// Deprecated: Use Attribute instead.
+func (i *Iterator) Label() KeyValue {
+	return i.Attribute()
+}
+
+// Attribute returns the current KeyValue of the Iterator. It must be called
+// only after Next returns true.
+func (i *Iterator) Attribute() KeyValue {
+	kv, _ := i.storage.Get(i.idx)
+	return kv
+}
+
+// IndexedLabel returns current index and attribute. Must be called only
+// after Next returns true.
+//
+// Deprecated: Use IndexedAttribute instead.
+func (i *Iterator) IndexedLabel() (int, KeyValue) {
+	return i.idx, i.Attribute()
+}
+
+// IndexedAttribute returns current index and attribute. Must be called only
+// after Next returns true.
+func (i *Iterator) IndexedAttribute() (int, KeyValue) {
+	return i.idx, i.Attribute()
+}
+
+// Len returns a number of attributes in the iterated set.
+func (i *Iterator) Len() int {
+	return i.storage.Len()
+}
+
+// ToSlice is a convenience function that creates a slice of attributes from
+// the passed iterator. The iterator is set up to start from the beginning
+// before creating the slice.
+func (i *Iterator) ToSlice() []KeyValue {
+	l := i.Len()
+	if l == 0 {
+		return nil
+	}
+	i.idx = -1
+	slice := make([]KeyValue, 0, l)
+	for i.Next() {
+		slice = append(slice, i.Attribute())
+	}
+	return slice
+}
+
+// NewMergeIterator returns a MergeIterator for merging two attribute sets.
+// Duplicates are resolved by taking the value from the first set.
+func NewMergeIterator(s1, s2 *Set) MergeIterator {
+	mi := MergeIterator{
+		one: makeOne(s1.Iter()),
+		two: makeOne(s2.Iter()),
+	}
+	return mi
+}
+
+func makeOne(iter Iterator) oneIterator {
+	oi := oneIterator{
+		iter: iter,
+	}
+	oi.advance()
+	return oi
+}
+
+func (oi *oneIterator) advance() {
+	if oi.done = !oi.iter.Next(); !oi.done {
+		oi.attr = oi.iter.Attribute()
+	}
+}
+
+// Next returns true if there is another attribute available.
+func (m *MergeIterator) Next() bool {
+	if m.one.done && m.two.done {
+		return false
+	}
+	if m.one.done {
+		m.current = m.two.attr
+		m.two.advance()
+		return true
+	}
+	if m.two.done {
+		m.current = m.one.attr
+		m.one.advance()
+		return true
+	}
+	if m.one.attr.Key == m.two.attr.Key {
+		m.current = m.one.attr // first iterator attribute value wins
+		m.one.advance()
+		m.two.advance()
+		return true
+	}
+	if m.one.attr.Key < m.two.attr.Key {
+		m.current = m.one.attr
+		m.one.advance()
+		return true
+	}
+	m.current = m.two.attr
+	m.two.advance()
+	return true
+}
+
+// Label returns the current value after Next() returns true.
+//
+// Deprecated: Use Attribute instead.
+func (m *MergeIterator) Label() KeyValue {
+	return m.current
+}
+
+// Attribute returns the current value after Next() returns true.
+func (m *MergeIterator) Attribute() KeyValue {
+	return m.current
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
new file mode 100644
index 000000000..0656a04e4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/key.go
@@ -0,0 +1,134 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Key represents the key part in key-value pairs. It's a string. The
+// allowed character set in the key depends on the use of the key.
+type Key string
+
+// Bool creates a KeyValue instance with a BOOL Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Bool(name, value).
+func (k Key) Bool(v bool) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: BoolValue(v),
+	}
+}
+
+// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- BoolSlice(name, value).
+func (k Key) BoolSlice(v []bool) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: BoolSliceValue(v),
+	}
+}
+
+// Int creates a KeyValue instance with an INT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int(name, value).
+func (k Key) Int(v int) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: IntValue(v),
+	}
+}
+
+// IntSlice creates a KeyValue instance with an INT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- IntSlice(name, value).
+func (k Key) IntSlice(v []int) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: IntSliceValue(v),
+	}
+}
+
+// Int64 creates a KeyValue instance with an INT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int64(name, value).
+func (k Key) Int64(v int64) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: Int64Value(v),
+	}
+}
+
+// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int64Slice(name, value).
+func (k Key) Int64Slice(v []int64) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: Int64SliceValue(v),
+	}
+}
+
+// Float64 creates a KeyValue instance with a FLOAT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Float64(name, value).
+func (k Key) Float64(v float64) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: Float64Value(v),
+	}
+}
+
+// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Float64(name, value).
+func (k Key) Float64Slice(v []float64) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: Float64SliceValue(v),
+	}
+}
+
+// String creates a KeyValue instance with a STRING Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- String(name, value).
+func (k Key) String(v string) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: StringValue(v),
+	}
+}
+
+// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- StringSlice(name, value).
+func (k Key) StringSlice(v []string) KeyValue {
+	return KeyValue{
+		Key:   k,
+		Value: StringSliceValue(v),
+	}
+}
+
+// Defined returns true for non-empty keys.
+func (k Key) Defined() bool {
+	return len(k) != 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
new file mode 100644
index 000000000..1ddf3ce05
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go
@@ -0,0 +1,86 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+	"fmt"
+)
+
+// KeyValue holds a key and value pair.
+type KeyValue struct {
+	Key   Key
+	Value Value
+}
+
+// Valid returns if kv is a valid OpenTelemetry attribute.
+func (kv KeyValue) Valid() bool {
+	return kv.Key.Defined() && kv.Value.Type() != INVALID
+}
+
+// Bool creates a KeyValue with a BOOL Value type.
+func Bool(k string, v bool) KeyValue {
+	return Key(k).Bool(v)
+}
+
+// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
+func BoolSlice(k string, v []bool) KeyValue {
+	return Key(k).BoolSlice(v)
+}
+
+// Int creates a KeyValue with an INT64 Value type.
+func Int(k string, v int) KeyValue {
+	return Key(k).Int(v)
+}
+
+// IntSlice creates a KeyValue with an INT64SLICE Value type.
+func IntSlice(k string, v []int) KeyValue {
+	return Key(k).IntSlice(v)
+}
+
+// Int64 creates a KeyValue with an INT64 Value type.
+func Int64(k string, v int64) KeyValue {
+	return Key(k).Int64(v)
+}
+
+// Int64Slice creates a KeyValue with an INT64SLICE Value type.
+func Int64Slice(k string, v []int64) KeyValue {
+	return Key(k).Int64Slice(v)
+}
+
+// Float64 creates a KeyValue with a FLOAT64 Value type.
+func Float64(k string, v float64) KeyValue {
+	return Key(k).Float64(v)
+}
+
+// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
+func Float64Slice(k string, v []float64) KeyValue {
+	return Key(k).Float64Slice(v)
+}
+
+// String creates a KeyValue with a STRING Value type.
+func String(k, v string) KeyValue {
+	return Key(k).String(v)
+}
+
+// StringSlice creates a KeyValue with a STRINGSLICE Value type.
+func StringSlice(k string, v []string) KeyValue {
+	return Key(k).StringSlice(v)
+}
+
+// Stringer creates a new key-value pair with a passed name and a string
+// value generated by the passed Stringer interface.
+func Stringer(k string, v fmt.Stringer) KeyValue {
+	return Key(k).String(v.String())
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
new file mode 100644
index 000000000..26be59832
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -0,0 +1,424 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+	"encoding/json"
+	"reflect"
+	"sort"
+)
+
+type (
+	// Set is the representation for a distinct attribute set. It manages an
+	// immutable set of attributes, with an internal cache for storing
+	// attribute encodings.
+	//
+	// This type supports the Equivalent method of comparison using values of
+	// type Distinct.
+	Set struct {
+		equivalent Distinct
+	}
+
+	// Distinct wraps a variable-size array of KeyValue, constructed with keys
+	// in sorted order. This can be used as a map key or for equality checking
+	// between Sets.
+	Distinct struct {
+		iface interface{}
+	}
+
+	// Filter supports removing certain attributes from attribute sets. When
+	// the filter returns true, the attribute will be kept in the filtered
+	// attribute set. When the filter returns false, the attribute is excluded
+	// from the filtered attribute set, and the attribute instead appears in
+	// the removed list of excluded attributes.
+	Filter func(KeyValue) bool
+
+	// Sortable implements sort.Interface, used for sorting KeyValue. This is
+	// an exported type to support a memory optimization. A pointer to one of
+	// these is needed for the call to sort.Stable(), which the caller may
+	// provide in order to avoid an allocation. See NewSetWithSortable().
+	Sortable []KeyValue
+)
+
+var (
+	// keyValueType is used in computeDistinctReflect.
+	keyValueType = reflect.TypeOf(KeyValue{})
+
+	// emptySet is returned for empty attribute sets.
+	emptySet = &Set{
+		equivalent: Distinct{
+			iface: [0]KeyValue{},
+		},
+	}
+)
+
+// EmptySet returns a reference to a Set with no elements.
+//
+// This is a convenience provided for optimized calling utility.
+func EmptySet() *Set {
+	return emptySet
+}
+
+// reflectValue abbreviates reflect.ValueOf(d).
+func (d Distinct) reflectValue() reflect.Value {
+	return reflect.ValueOf(d.iface)
+}
+
+// Valid returns true if this value refers to a valid Set.
+func (d Distinct) Valid() bool {
+	return d.iface != nil
+}
+
+// Len returns the number of attributes in this set.
+func (l *Set) Len() int {
+	if l == nil || !l.equivalent.Valid() {
+		return 0
+	}
+	return l.equivalent.reflectValue().Len()
+}
+
+// Get returns the KeyValue at ordered position idx in this set.
+func (l *Set) Get(idx int) (KeyValue, bool) {
+	if l == nil {
+		return KeyValue{}, false
+	}
+	value := l.equivalent.reflectValue()
+
+	if idx >= 0 && idx < value.Len() {
+		// Note: The Go compiler successfully avoids an allocation for
+		// the interface{} conversion here:
+		return value.Index(idx).Interface().(KeyValue), true
+	}
+
+	return KeyValue{}, false
+}
+
+// Value returns the value of a specified key in this set.
+func (l *Set) Value(k Key) (Value, bool) {
+	if l == nil {
+		return Value{}, false
+	}
+	rValue := l.equivalent.reflectValue()
+	vlen := rValue.Len()
+
+	idx := sort.Search(vlen, func(idx int) bool {
+		return rValue.Index(idx).Interface().(KeyValue).Key >= k
+	})
+	if idx >= vlen {
+		return Value{}, false
+	}
+	keyValue := rValue.Index(idx).Interface().(KeyValue)
+	if k == keyValue.Key {
+		return keyValue.Value, true
+	}
+	return Value{}, false
+}
+
+// HasValue tests whether a key is defined in this set.
+func (l *Set) HasValue(k Key) bool {
+	if l == nil {
+		return false
+	}
+	_, ok := l.Value(k)
+	return ok
+}
+
+// Iter returns an iterator for visiting the attributes in this set.
+func (l *Set) Iter() Iterator {
+	return Iterator{
+		storage: l,
+		idx:     -1,
+	}
+}
+
+// ToSlice returns the set of attributes belonging to this set, sorted, where
+// keys appear no more than once.
+func (l *Set) ToSlice() []KeyValue {
+	iter := l.Iter()
+	return iter.ToSlice()
+}
+
+// Equivalent returns a value that may be used as a map key. The Distinct type
+// guarantees that the result will equal the equivalent. Distinct value of any
+// attribute set with the same elements as this, where sets are made unique by
+// choosing the last value in the input for any given key.
+func (l *Set) Equivalent() Distinct {
+	if l == nil || !l.equivalent.Valid() {
+		return emptySet.equivalent
+	}
+	return l.equivalent
+}
+
+// Equals returns true if the argument set is equivalent to this set.
+func (l *Set) Equals(o *Set) bool {
+	return l.Equivalent() == o.Equivalent()
+}
+
+// Encoded returns the encoded form of this set, according to encoder.
+func (l *Set) Encoded(encoder Encoder) string {
+	if l == nil || encoder == nil {
+		return ""
+	}
+
+	return encoder.Encode(l.Iter())
+}
+
+func empty() Set {
+	return Set{
+		equivalent: emptySet.equivalent,
+	}
+}
+
+// NewSet returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// Except for empty sets, this method adds an additional allocation compared
+// with calls that include a Sortable.
+func NewSet(kvs ...KeyValue) Set {
+	// Check for empty set.
+	if len(kvs) == 0 {
+		return empty()
+	}
+	s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil)
+	return s
+}
+
+// NewSetWithSortable returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// This call includes a Sortable option as a memory optimization.
+func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
+	// Check for empty set.
+	if len(kvs) == 0 {
+		return empty()
+	}
+	s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
+	return s
+}
+
+// NewSetWithFiltered returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// This call includes a Filter to include/exclude attribute keys from the
+// return value. Excluded keys are returned as a slice of attribute values.
+func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
+	// Check for empty set.
+	if len(kvs) == 0 {
+		return empty(), nil
+	}
+	return NewSetWithSortableFiltered(kvs, new(Sortable), filter)
+}
+
+// NewSetWithSortableFiltered returns a new Set.
+//
+// Duplicate keys are eliminated by taking the last value.  This
+// re-orders the input slice so that unique last-values are contiguous
+// at the end of the slice.
+//
+// This ensures the following:
+//
+// - Last-value-wins semantics
+// - Caller sees the reordering, but doesn't lose values
+// - Repeated call preserve last-value wins.
+//
+// Note that methods are defined on Set, although this returns Set. Callers
+// can avoid memory allocations by:
+//
+// - allocating a Sortable for use as a temporary in this method
+// - allocating a Set for storing the return value of this constructor.
+//
+// The result maintains a cache of encoded attributes, by attribute.EncoderID.
+// This value should not be copied after its first use.
+//
+// The second []KeyValue return value is a list of attributes that were
+// excluded by the Filter (if non-nil).
+func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
+	// Check for empty set.
+	if len(kvs) == 0 {
+		return empty(), nil
+	}
+
+	*tmp = kvs
+
+	// Stable sort so the following de-duplication can implement
+	// last-value-wins semantics.
+	sort.Stable(tmp)
+
+	*tmp = nil
+
+	position := len(kvs) - 1
+	offset := position - 1
+
+	// The requirements stated above require that the stable
+	// result be placed in the end of the input slice, while
+	// overwritten values are swapped to the beginning.
+	//
+	// De-duplicate with last-value-wins semantics.  Preserve
+	// duplicate values at the beginning of the input slice.
+	for ; offset >= 0; offset-- {
+		if kvs[offset].Key == kvs[position].Key {
+			continue
+		}
+		position--
+		kvs[offset], kvs[position] = kvs[position], kvs[offset]
+	}
+	if filter != nil {
+		return filterSet(kvs[position:], filter)
+	}
+	return Set{
+		equivalent: computeDistinct(kvs[position:]),
+	}, nil
+}
+
+// filterSet reorders kvs so that included keys are contiguous at the end of
+// the slice, while excluded keys precede the included keys.
+func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
+	var excluded []KeyValue
+
+	// Move attributes that do not match the filter so they're adjacent before
+	// calling computeDistinct().
+	distinctPosition := len(kvs)
+
+	// Swap indistinct keys forward and distinct keys toward the
+	// end of the slice.
+	offset := len(kvs) - 1
+	for ; offset >= 0; offset-- {
+		if filter(kvs[offset]) {
+			distinctPosition--
+			kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
+			continue
+		}
+	}
+	excluded = kvs[:distinctPosition]
+
+	return Set{
+		equivalent: computeDistinct(kvs[distinctPosition:]),
+	}, excluded
+}
+
+// Filter returns a filtered copy of this Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+func (l *Set) Filter(re Filter) (Set, []KeyValue) {
+	if re == nil {
+		return Set{
+			equivalent: l.equivalent,
+		}, nil
+	}
+
+	// Note: This could be refactored to avoid the temporary slice
+	// allocation, if it proves to be expensive.
+	return filterSet(l.ToSlice(), re)
+}
+
+// computeDistinct returns a Distinct using either the fixed- or
+// reflect-oriented code path, depending on the size of the input. The input
+// slice is assumed to already be sorted and de-duplicated.
+func computeDistinct(kvs []KeyValue) Distinct {
+	iface := computeDistinctFixed(kvs)
+	if iface == nil {
+		iface = computeDistinctReflect(kvs)
+	}
+	return Distinct{
+		iface: iface,
+	}
+}
+
+// computeDistinctFixed computes a Distinct for small slices. It returns nil
+// if the input is too large for this code path.
+func computeDistinctFixed(kvs []KeyValue) interface{} {
+	switch len(kvs) {
+	case 1:
+		ptr := new([1]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 2:
+		ptr := new([2]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 3:
+		ptr := new([3]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 4:
+		ptr := new([4]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 5:
+		ptr := new([5]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 6:
+		ptr := new([6]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 7:
+		ptr := new([7]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 8:
+		ptr := new([8]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 9:
+		ptr := new([9]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	case 10:
+		ptr := new([10]KeyValue)
+		copy((*ptr)[:], kvs)
+		return *ptr
+	default:
+		return nil
+	}
+}
+
+// computeDistinctReflect computes a Distinct using reflection, works for any
+// size input.
+func computeDistinctReflect(kvs []KeyValue) interface{} {
+	at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
+	for i, keyValue := range kvs {
+		*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
+	}
+	return at.Interface()
+}
+
+// MarshalJSON returns the JSON encoding of the Set.
+func (l *Set) MarshalJSON() ([]byte, error) {
+	return json.Marshal(l.equivalent.iface)
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+func (l Set) MarshalLog() interface{} {
+	kvs := make(map[string]string)
+	for _, kv := range l.ToSlice() {
+		kvs[string(kv.Key)] = kv.Value.Emit()
+	}
+	return kvs
+}
+
+// Len implements sort.Interface.
+func (l *Sortable) Len() int {
+	return len(*l)
+}
+
+// Swap implements sort.Interface.
+func (l *Sortable) Swap(i, j int) {
+	(*l)[i], (*l)[j] = (*l)[j], (*l)[i]
+}
+
+// Less implements sort.Interface.
+func (l *Sortable) Less(i, j int) bool {
+	return (*l)[i].Key < (*l)[j].Key
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
new file mode 100644
index 000000000..e584b2477
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
@@ -0,0 +1,31 @@
+// Code generated by "stringer -type=Type"; DO NOT EDIT.
+
+package attribute
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[INVALID-0]
+	_ = x[BOOL-1]
+	_ = x[INT64-2]
+	_ = x[FLOAT64-3]
+	_ = x[STRING-4]
+	_ = x[BOOLSLICE-5]
+	_ = x[INT64SLICE-6]
+	_ = x[FLOAT64SLICE-7]
+	_ = x[STRINGSLICE-8]
+}
+
+const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
+
+var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
+
+func (i Type) String() string {
+	if i < 0 || i >= Type(len(_Type_index)-1) {
+		return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _Type_name[_Type_index[i]:_Type_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
new file mode 100644
index 000000000..cb21dd5c0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -0,0 +1,270 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+
+	"go.opentelemetry.io/otel/internal"
+	"go.opentelemetry.io/otel/internal/attribute"
+)
+
+//go:generate stringer -type=Type
+
+// Type describes the type of the data Value holds.
+type Type int // nolint: revive  // redefines builtin Type.
+
+// Value represents the value part in key-value pairs.
+type Value struct {
+	vtype    Type
+	numeric  uint64
+	stringly string
+	slice    interface{}
+}
+
+const (
+	// INVALID is used for a Value with no value set.
+	INVALID Type = iota
+	// BOOL is a boolean Type Value.
+	BOOL
+	// INT64 is a 64-bit signed integral Type Value.
+	INT64
+	// FLOAT64 is a 64-bit floating point Type Value.
+	FLOAT64
+	// STRING is a string Type Value.
+	STRING
+	// BOOLSLICE is a slice of booleans Type Value.
+	BOOLSLICE
+	// INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
+	INT64SLICE
+	// FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
+	FLOAT64SLICE
+	// STRINGSLICE is a slice of strings Type Value.
+	STRINGSLICE
+)
+
+// BoolValue creates a BOOL Value.
+func BoolValue(v bool) Value {
+	return Value{
+		vtype:   BOOL,
+		numeric: internal.BoolToRaw(v),
+	}
+}
+
+// BoolSliceValue creates a BOOLSLICE Value.
+func BoolSliceValue(v []bool) Value {
+	return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)}
+}
+
+// IntValue creates an INT64 Value.
+func IntValue(v int) Value {
+	return Int64Value(int64(v))
+}
+
+// IntSliceValue creates an INTSLICE Value.
+func IntSliceValue(v []int) Value {
+	var int64Val int64
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
+	for i, val := range v {
+		cp.Elem().Index(i).SetInt(int64(val))
+	}
+	return Value{
+		vtype: INT64SLICE,
+		slice: cp.Elem().Interface(),
+	}
+}
+
+// Int64Value creates an INT64 Value.
+func Int64Value(v int64) Value {
+	return Value{
+		vtype:   INT64,
+		numeric: internal.Int64ToRaw(v),
+	}
+}
+
+// Int64SliceValue creates an INT64SLICE Value.
+func Int64SliceValue(v []int64) Value {
+	return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)}
+}
+
+// Float64Value creates a FLOAT64 Value.
+func Float64Value(v float64) Value {
+	return Value{
+		vtype:   FLOAT64,
+		numeric: internal.Float64ToRaw(v),
+	}
+}
+
+// Float64SliceValue creates a FLOAT64SLICE Value.
+func Float64SliceValue(v []float64) Value {
+	return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)}
+}
+
+// StringValue creates a STRING Value.
+func StringValue(v string) Value {
+	return Value{
+		vtype:    STRING,
+		stringly: v,
+	}
+}
+
+// StringSliceValue creates a STRINGSLICE Value.
+func StringSliceValue(v []string) Value {
+	return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)}
+}
+
+// Type returns a type of the Value.
+func (v Value) Type() Type {
+	return v.vtype
+}
+
+// AsBool returns the bool value. Make sure that the Value's type is
+// BOOL.
+func (v Value) AsBool() bool {
+	return internal.RawToBool(v.numeric)
+}
+
+// AsBoolSlice returns the []bool value. Make sure that the Value's type is
+// BOOLSLICE.
+func (v Value) AsBoolSlice() []bool {
+	if v.vtype != BOOLSLICE {
+		return nil
+	}
+	return v.asBoolSlice()
+}
+
+func (v Value) asBoolSlice() []bool {
+	return attribute.AsBoolSlice(v.slice)
+}
+
+// AsInt64 returns the int64 value. Make sure that the Value's type is
+// INT64.
+func (v Value) AsInt64() int64 {
+	return internal.RawToInt64(v.numeric)
+}
+
+// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
+// INT64SLICE.
+func (v Value) AsInt64Slice() []int64 {
+	if v.vtype != INT64SLICE {
+		return nil
+	}
+	return v.asInt64Slice()
+}
+
+func (v Value) asInt64Slice() []int64 {
+	return attribute.AsInt64Slice(v.slice)
+}
+
+// AsFloat64 returns the float64 value. Make sure that the Value's
+// type is FLOAT64.
+func (v Value) AsFloat64() float64 {
+	return internal.RawToFloat64(v.numeric)
+}
+
+// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
+// FLOAT64SLICE.
+func (v Value) AsFloat64Slice() []float64 {
+	if v.vtype != FLOAT64SLICE {
+		return nil
+	}
+	return v.asFloat64Slice()
+}
+
+func (v Value) asFloat64Slice() []float64 {
+	return attribute.AsFloat64Slice(v.slice)
+}
+
+// AsString returns the string value. Make sure that the Value's type
+// is STRING.
+func (v Value) AsString() string {
+	return v.stringly
+}
+
+// AsStringSlice returns the []string value. Make sure that the Value's type is
+// STRINGSLICE.
+func (v Value) AsStringSlice() []string {
+	if v.vtype != STRINGSLICE {
+		return nil
+	}
+	return v.asStringSlice()
+}
+
+func (v Value) asStringSlice() []string {
+	return attribute.AsStringSlice(v.slice)
+}
+
+type unknownValueType struct{}
+
+// AsInterface returns Value's data as interface{}.
+func (v Value) AsInterface() interface{} {
+	switch v.Type() {
+	case BOOL:
+		return v.AsBool()
+	case BOOLSLICE:
+		return v.asBoolSlice()
+	case INT64:
+		return v.AsInt64()
+	case INT64SLICE:
+		return v.asInt64Slice()
+	case FLOAT64:
+		return v.AsFloat64()
+	case FLOAT64SLICE:
+		return v.asFloat64Slice()
+	case STRING:
+		return v.stringly
+	case STRINGSLICE:
+		return v.asStringSlice()
+	}
+	return unknownValueType{}
+}
+
+// Emit returns a string representation of Value's data.
+func (v Value) Emit() string {
+	switch v.Type() {
+	case BOOLSLICE:
+		return fmt.Sprint(v.asBoolSlice())
+	case BOOL:
+		return strconv.FormatBool(v.AsBool())
+	case INT64SLICE:
+		return fmt.Sprint(v.asInt64Slice())
+	case INT64:
+		return strconv.FormatInt(v.AsInt64(), 10)
+	case FLOAT64SLICE:
+		return fmt.Sprint(v.asFloat64Slice())
+	case FLOAT64:
+		return fmt.Sprint(v.AsFloat64())
+	case STRINGSLICE:
+		return fmt.Sprint(v.asStringSlice())
+	case STRING:
+		return v.stringly
+	default:
+		return "unknown"
+	}
+}
+
+// MarshalJSON returns the JSON encoding of the Value.
+func (v Value) MarshalJSON() ([]byte, error) {
+	var jsonVal struct {
+		Type  string
+		Value interface{}
+	}
+	jsonVal.Type = v.Type().String()
+	jsonVal.Value = v.AsInterface()
+	return json.Marshal(jsonVal)
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
new file mode 100644
index 000000000..a36db8f8d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -0,0 +1,570 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage // import "go.opentelemetry.io/otel/baggage"
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"regexp"
+	"strings"
+
+	"go.opentelemetry.io/otel/internal/baggage"
+)
+
+const (
+	maxMembers               = 180
+	maxBytesPerMembers       = 4096
+	maxBytesPerBaggageString = 8192
+
+	listDelimiter     = ","
+	keyValueDelimiter = "="
+	propertyDelimiter = ";"
+
+	keyDef      = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
+	valueDef    = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
+	keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
+)
+
+var (
+	keyRe      = regexp.MustCompile(`^` + keyDef + `$`)
+	valueRe    = regexp.MustCompile(`^` + valueDef + `$`)
+	propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
+)
+
+var (
+	errInvalidKey      = errors.New("invalid key")
+	errInvalidValue    = errors.New("invalid value")
+	errInvalidProperty = errors.New("invalid baggage list-member property")
+	errInvalidMember   = errors.New("invalid baggage list-member")
+	errMemberNumber    = errors.New("too many list-members in baggage-string")
+	errMemberBytes     = errors.New("list-member too large")
+	errBaggageBytes    = errors.New("baggage-string too large")
+)
+
+// Property is an additional metadata entry for a baggage list-member.
+type Property struct {
+	key, value string
+
+	// hasValue indicates if a zero-value value means the property does not
+	// have a value or if it was the zero-value.
+	hasValue bool
+
+	// hasData indicates whether the created property contains data or not.
+	// Properties that do not contain data are invalid with no other check
+	// required.
+	hasData bool
+}
+
+// NewKeyProperty returns a new Property for key.
+//
+// If key is invalid, an error will be returned.
+func NewKeyProperty(key string) (Property, error) {
+	if !keyRe.MatchString(key) {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+	}
+
+	p := Property{key: key, hasData: true}
+	return p, nil
+}
+
+// NewKeyValueProperty returns a new Property for key with value.
+//
+// If key or value are invalid, an error will be returned.
+func NewKeyValueProperty(key, value string) (Property, error) {
+	if !keyRe.MatchString(key) {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+	}
+	if !valueRe.MatchString(value) {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+	}
+
+	p := Property{
+		key:      key,
+		value:    value,
+		hasValue: true,
+		hasData:  true,
+	}
+	return p, nil
+}
+
+func newInvalidProperty() Property {
+	return Property{}
+}
+
+// parseProperty attempts to decode a Property from the passed string. It
+// returns an error if the input is invalid according to the W3C Baggage
+// specification.
+func parseProperty(property string) (Property, error) {
+	if property == "" {
+		return newInvalidProperty(), nil
+	}
+
+	match := propertyRe.FindStringSubmatch(property)
+	if len(match) != 4 {
+		return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
+	}
+
+	p := Property{hasData: true}
+	if match[1] != "" {
+		p.key = match[1]
+	} else {
+		p.key = match[2]
+		p.value = match[3]
+		p.hasValue = true
+	}
+
+	return p, nil
+}
+
+// validate ensures p conforms to the W3C Baggage specification, returning an
+// error otherwise.
+func (p Property) validate() error {
+	errFunc := func(err error) error {
+		return fmt.Errorf("invalid property: %w", err)
+	}
+
+	if !p.hasData {
+		return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p))
+	}
+
+	if !keyRe.MatchString(p.key) {
+		return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
+	}
+	if p.hasValue && !valueRe.MatchString(p.value) {
+		return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
+	}
+	if !p.hasValue && p.value != "" {
+		return errFunc(errors.New("inconsistent value"))
+	}
+	return nil
+}
+
+// Key returns the Property key.
+func (p Property) Key() string {
+	return p.key
+}
+
+// Value returns the Property value. Additionally, a boolean value is returned
+// indicating if the returned value is the empty if the Property has a value
+// that is empty or if the value is not set.
+func (p Property) Value() (string, bool) {
+	return p.value, p.hasValue
+}
+
+// String encodes Property into a string compliant with the W3C Baggage
+// specification.
+func (p Property) String() string {
+	if p.hasValue {
+		return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
+	}
+	return p.key
+}
+
+type properties []Property
+
+func fromInternalProperties(iProps []baggage.Property) properties {
+	if len(iProps) == 0 {
+		return nil
+	}
+
+	props := make(properties, len(iProps))
+	for i, p := range iProps {
+		props[i] = Property{
+			key:      p.Key,
+			value:    p.Value,
+			hasValue: p.HasValue,
+		}
+	}
+	return props
+}
+
+func (p properties) asInternal() []baggage.Property {
+	if len(p) == 0 {
+		return nil
+	}
+
+	iProps := make([]baggage.Property, len(p))
+	for i, prop := range p {
+		iProps[i] = baggage.Property{
+			Key:      prop.key,
+			Value:    prop.value,
+			HasValue: prop.hasValue,
+		}
+	}
+	return iProps
+}
+
+func (p properties) Copy() properties {
+	if len(p) == 0 {
+		return nil
+	}
+
+	props := make(properties, len(p))
+	copy(props, p)
+	return props
+}
+
+// validate ensures each Property in p conforms to the W3C Baggage
+// specification, returning an error otherwise.
+func (p properties) validate() error {
+	for _, prop := range p {
+		if err := prop.validate(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// String encodes properties into a string compliant with the W3C Baggage
+// specification.
+func (p properties) String() string {
+	props := make([]string, len(p))
+	for i, prop := range p {
+		props[i] = prop.String()
+	}
+	return strings.Join(props, propertyDelimiter)
+}
+
+// Member is a list-member of a baggage-string as defined by the W3C Baggage
+// specification.
+type Member struct {
+	key, value string
+	properties properties
+
+	// hasData indicates whether the created property contains data or not.
+	// Properties that do not contain data are invalid with no other check
+	// required.
+	hasData bool
+}
+
+// NewMember returns a new Member from the passed arguments. The key will be
+// used directly while the value will be url decoded after validation. An error
+// is returned if the created Member would be invalid according to the W3C
+// Baggage specification.
+func NewMember(key, value string, props ...Property) (Member, error) {
+	m := Member{
+		key:        key,
+		value:      value,
+		properties: properties(props).Copy(),
+		hasData:    true,
+	}
+	if err := m.validate(); err != nil {
+		return newInvalidMember(), err
+	}
+	decodedValue, err := url.QueryUnescape(value)
+	if err != nil {
+		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+	}
+	m.value = decodedValue
+	return m, nil
+}
+
+func newInvalidMember() Member {
+	return Member{}
+}
+
+// parseMember attempts to decode a Member from the passed string. It returns
+// an error if the input is invalid according to the W3C Baggage
+// specification.
+func parseMember(member string) (Member, error) {
+	if n := len(member); n > maxBytesPerMembers {
+		return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
+	}
+
+	var (
+		key, value string
+		props      properties
+	)
+
+	parts := strings.SplitN(member, propertyDelimiter, 2)
+	switch len(parts) {
+	case 2:
+		// Parse the member properties.
+		for _, pStr := range strings.Split(parts[1], propertyDelimiter) {
+			p, err := parseProperty(pStr)
+			if err != nil {
+				return newInvalidMember(), err
+			}
+			props = append(props, p)
+		}
+		fallthrough
+	case 1:
+		// Parse the member key/value pair.
+
+		// Take into account a value can contain equal signs (=).
+		kv := strings.SplitN(parts[0], keyValueDelimiter, 2)
+		if len(kv) != 2 {
+			return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
+		}
+		// "Leading and trailing whitespaces are allowed but MUST be trimmed
+		// when converting the header into a data structure."
+		key = strings.TrimSpace(kv[0])
+		var err error
+		value, err = url.QueryUnescape(strings.TrimSpace(kv[1]))
+		if err != nil {
+			return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
+		}
+		if !keyRe.MatchString(key) {
+			return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
+		}
+		if !valueRe.MatchString(value) {
+			return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+		}
+	default:
+		// This should never happen unless a developer has changed the string
+		// splitting somehow. Panic instead of failing silently and allowing
+		// the bug to slip past the CI checks.
+		panic("failed to parse baggage member")
+	}
+
+	return Member{key: key, value: value, properties: props, hasData: true}, nil
+}
+
+// validate ensures m conforms to the W3C Baggage specification.
+// A key is just an ASCII string, but a value must be URL encoded UTF-8,
+// returning an error otherwise.
+func (m Member) validate() error {
+	if !m.hasData {
+		return fmt.Errorf("%w: %q", errInvalidMember, m)
+	}
+
+	if !keyRe.MatchString(m.key) {
+		return fmt.Errorf("%w: %q", errInvalidKey, m.key)
+	}
+	if !valueRe.MatchString(m.value) {
+		return fmt.Errorf("%w: %q", errInvalidValue, m.value)
+	}
+	return m.properties.validate()
+}
+
+// Key returns the Member key.
+func (m Member) Key() string { return m.key }
+
+// Value returns the Member value.
+func (m Member) Value() string { return m.value }
+
+// Properties returns a copy of the Member properties.
+func (m Member) Properties() []Property { return m.properties.Copy() }
+
+// String encodes Member into a string compliant with the W3C Baggage
+// specification.
+func (m Member) String() string {
+	// A key is just an ASCII string, but a value is URL encoded UTF-8.
+	s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
+	if len(m.properties) > 0 {
+		s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
+	}
+	return s
+}
+
+// Baggage is a list of baggage members representing the baggage-string as
+// defined by the W3C Baggage specification.
+type Baggage struct { //nolint:golint
+	list baggage.List
+}
+
+// New returns a new valid Baggage. It returns an error if it results in a
+// Baggage exceeding limits set in that specification.
+//
+// It expects all the provided members to have already been validated.
+func New(members ...Member) (Baggage, error) {
+	if len(members) == 0 {
+		return Baggage{}, nil
+	}
+
+	b := make(baggage.List)
+	for _, m := range members {
+		if !m.hasData {
+			return Baggage{}, errInvalidMember
+		}
+
+		// OpenTelemetry resolves duplicates by last-one-wins.
+		b[m.key] = baggage.Item{
+			Value:      m.value,
+			Properties: m.properties.asInternal(),
+		}
+	}
+
+	// Check member numbers after deduplication.
+	if len(b) > maxMembers {
+		return Baggage{}, errMemberNumber
+	}
+
+	bag := Baggage{b}
+	if n := len(bag.String()); n > maxBytesPerBaggageString {
+		return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
+	}
+
+	return bag, nil
+}
+
+// Parse attempts to decode a baggage-string from the passed string. It
+// returns an error if the input is invalid according to the W3C Baggage
+// specification.
+//
+// If there are duplicate list-members contained in baggage, the last one
+// defined (reading left-to-right) will be the only one kept. This diverges
+// from the W3C Baggage specification which allows duplicate list-members, but
+// conforms to the OpenTelemetry Baggage specification.
+func Parse(bStr string) (Baggage, error) {
+	if bStr == "" {
+		return Baggage{}, nil
+	}
+
+	if n := len(bStr); n > maxBytesPerBaggageString {
+		return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
+	}
+
+	b := make(baggage.List)
+	for _, memberStr := range strings.Split(bStr, listDelimiter) {
+		m, err := parseMember(memberStr)
+		if err != nil {
+			return Baggage{}, err
+		}
+		// OpenTelemetry resolves duplicates by last-one-wins.
+		b[m.key] = baggage.Item{
+			Value:      m.value,
+			Properties: m.properties.asInternal(),
+		}
+	}
+
+	// OpenTelemetry does not allow for duplicate list-members, but the W3C
+	// specification does. Now that we have deduplicated, ensure the baggage
+	// does not exceed list-member limits.
+	if len(b) > maxMembers {
+		return Baggage{}, errMemberNumber
+	}
+
+	return Baggage{b}, nil
+}
+
+// Member returns the baggage list-member identified by key.
+//
+// If there is no list-member matching the passed key the returned Member will
+// be a zero-value Member.
+// The returned member is not validated, as we assume the validation happened
+// when it was added to the Baggage.
+func (b Baggage) Member(key string) Member {
+	v, ok := b.list[key]
+	if !ok {
+		// We do not need to worry about distinguishing between the situation
+		// where a zero-valued Member is included in the Baggage because a
+		// zero-valued Member is invalid according to the W3C Baggage
+		// specification (it has an empty key).
+		return newInvalidMember()
+	}
+
+	return Member{
+		key:        key,
+		value:      v.Value,
+		properties: fromInternalProperties(v.Properties),
+		hasData:    true,
+	}
+}
+
+// Members returns all the baggage list-members.
+// The order of the returned list-members does not have significance.
+//
+// The returned members are not validated, as we assume the validation happened
+// when they were added to the Baggage.
+func (b Baggage) Members() []Member {
+	if len(b.list) == 0 {
+		return nil
+	}
+
+	members := make([]Member, 0, len(b.list))
+	for k, v := range b.list {
+		members = append(members, Member{
+			key:        k,
+			value:      v.Value,
+			properties: fromInternalProperties(v.Properties),
+			hasData:    true,
+		})
+	}
+	return members
+}
+
+// SetMember returns a copy the Baggage with the member included. If the
+// baggage contains a Member with the same key the existing Member is
+// replaced.
+//
+// If member is invalid according to the W3C Baggage specification, an error
+// is returned with the original Baggage.
+func (b Baggage) SetMember(member Member) (Baggage, error) {
+	if !member.hasData {
+		return b, errInvalidMember
+	}
+
+	n := len(b.list)
+	if _, ok := b.list[member.key]; !ok {
+		n++
+	}
+	list := make(baggage.List, n)
+
+	for k, v := range b.list {
+		// Do not copy if we are just going to overwrite.
+		if k == member.key {
+			continue
+		}
+		list[k] = v
+	}
+
+	list[member.key] = baggage.Item{
+		Value:      member.value,
+		Properties: member.properties.asInternal(),
+	}
+
+	return Baggage{list: list}, nil
+}
+
+// DeleteMember returns a copy of the Baggage with the list-member identified
+// by key removed.
+func (b Baggage) DeleteMember(key string) Baggage {
+	n := len(b.list)
+	if _, ok := b.list[key]; ok {
+		n--
+	}
+	list := make(baggage.List, n)
+
+	for k, v := range b.list {
+		if k == key {
+			continue
+		}
+		list[k] = v
+	}
+
+	return Baggage{list: list}
+}
+
+// Len returns the number of list-members in the Baggage.
+func (b Baggage) Len() int {
+	return len(b.list)
+}
+
+// String encodes Baggage into a string compliant with the W3C Baggage
+// specification. The returned string will be invalid if the Baggage contains
+// any invalid list-members.
+func (b Baggage) String() string {
+	members := make([]string, 0, len(b.list))
+	for k, v := range b.list {
+		members = append(members, Member{
+			key:        k,
+			value:      v.Value,
+			properties: fromInternalProperties(v.Properties),
+		}.String())
+	}
+	return strings.Join(members, listDelimiter)
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go
new file mode 100644
index 000000000..24b34b756
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/context.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage // import "go.opentelemetry.io/otel/baggage"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/internal/baggage"
+)
+
+// ContextWithBaggage returns a copy of parent with baggage.
+func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
+	// Delegate so any hooks for the OpenTracing bridge are handled.
+	return baggage.ContextWithList(parent, b.list)
+}
+
+// ContextWithoutBaggage returns a copy of parent with no baggage.
+func ContextWithoutBaggage(parent context.Context) context.Context {
+	// Delegate so any hooks for the OpenTracing bridge are handled.
+	return baggage.ContextWithList(parent, nil)
+}
+
+// FromContext returns the baggage contained in ctx.
+func FromContext(ctx context.Context) Baggage {
+	// Delegate so any hooks for the OpenTracing bridge are handled.
+	return Baggage{list: baggage.ListFromContext(ctx)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go
new file mode 100644
index 000000000..4545100df
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package baggage provides functionality for storing and retrieving
+baggage items in Go context. For propagating the baggage, see the
+go.opentelemetry.io/otel/propagation package.
+*/
+package baggage // import "go.opentelemetry.io/otel/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
new file mode 100644
index 000000000..587ebae4e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -0,0 +1,116 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package codes // import "go.opentelemetry.io/otel/codes"
+
+import (
+	"encoding/json"
+	"fmt"
+	"strconv"
+)
+
+const (
+	// Unset is the default status code.
+	Unset Code = 0
+
+	// Error indicates the operation contains an error.
+	//
+	// NOTE: The error code in OTLP is 2.
+	// The value of this enum is only relevant to the internals
+	// of the Go SDK.
+	Error Code = 1
+
+	// Ok indicates operation has been validated by an Application developers
+	// or Operator to have completed successfully, or contain no error.
+	//
+	// NOTE: The Ok code in OTLP is 1.
+	// The value of this enum is only relevant to the internals
+	// of the Go SDK.
+	Ok Code = 2
+
+	maxCode = 3
+)
+
+// Code is an 32-bit representation of a status state.
+type Code uint32
+
+var codeToStr = map[Code]string{
+	Unset: "Unset",
+	Error: "Error",
+	Ok:    "Ok",
+}
+
+var strToCode = map[string]Code{
+	`"Unset"`: Unset,
+	`"Error"`: Error,
+	`"Ok"`:    Ok,
+}
+
+// String returns the Code as a string.
+func (c Code) String() string {
+	return codeToStr[c]
+}
+
+// UnmarshalJSON unmarshals b into the Code.
+//
+// This is based on the functionality in the gRPC codes package:
+// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244
+func (c *Code) UnmarshalJSON(b []byte) error {
+	// From json.Unmarshaler: By convention, to approximate the behavior of
+	// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
+	// a no-op.
+	if string(b) == "null" {
+		return nil
+	}
+	if c == nil {
+		return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+	}
+
+	var x interface{}
+	if err := json.Unmarshal(b, &x); err != nil {
+		return err
+	}
+	switch x.(type) {
+	case string:
+		if jc, ok := strToCode[string(b)]; ok {
+			*c = jc
+			return nil
+		}
+		return fmt.Errorf("invalid code: %q", string(b))
+	case float64:
+		if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
+			if ci >= maxCode {
+				return fmt.Errorf("invalid code: %q", ci)
+			}
+
+			*c = Code(ci)
+			return nil
+		}
+		return fmt.Errorf("invalid code: %q", string(b))
+	default:
+		return fmt.Errorf("invalid code: %q", string(b))
+	}
+}
+
+// MarshalJSON returns c as the JSON encoding of c.
+func (c *Code) MarshalJSON() ([]byte, error) {
+	if c == nil {
+		return []byte("null"), nil
+	}
+	str, ok := codeToStr[*c]
+	if !ok {
+		return nil, fmt.Errorf("invalid code: %d", *c)
+	}
+	return []byte(fmt.Sprintf("%q", str)), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go
new file mode 100644
index 000000000..df3e0f1b6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/doc.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package codes defines the canonical error codes used by OpenTelemetry.
+
+It conforms to [the OpenTelemetry
+specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode).
+*/
+package codes // import "go.opentelemetry.io/otel/codes"
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
new file mode 100644
index 000000000..daa36c89d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -0,0 +1,34 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otel provides global access to the OpenTelemetry API. The subpackages of
+the otel package provide an implementation of the OpenTelemetry API.
+
+The provided API is used to instrument code and measure data about that code's
+performance and operation. The measured data, by default, is not processed or
+transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
+default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
+exporters are used to process and transport this data.
+
+To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/.
+
+To read more about tracing, see go.opentelemetry.io/otel/trace.
+
+To read more about metrics, see go.opentelemetry.io/otel/metric.
+
+To read more about propagation, see go.opentelemetry.io/otel/propagation and
+go.opentelemetry.io/otel/baggage.
+*/
+package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go
new file mode 100644
index 000000000..72fad8541
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/error_handler.go
@@ -0,0 +1,38 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+// ErrorHandler handles irremediable events.
+type ErrorHandler interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Handle handles any error deemed irremediable by an OpenTelemetry
+	// component.
+	Handle(error)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// ErrorHandlerFunc is a convenience adapter to allow the use of a function
+// as an ErrorHandler.
+type ErrorHandlerFunc func(error)
+
+var _ ErrorHandler = ErrorHandlerFunc(nil)
+
+// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
+func (f ErrorHandlerFunc) Handle(err error) {
+	f(err)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md b/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md
new file mode 100644
index 000000000..598c569a5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md
@@ -0,0 +1,50 @@
+# OpenTelemetry-Go Jaeger Exporter
+
+[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/jaeger.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger)
+
+[OpenTelemetry span exporter for Jaeger](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md) implementation.
+
+## Installation
+
+```
+go get -u go.opentelemetry.io/otel/exporters/jaeger
+```
+
+## Example
+
+See [../../example/jaeger](../../example/jaeger).
+
+## Configuration
+
+The exporter can be used to send spans to:
+
+- Jaeger agent using `jaeger.thrift` over compact thrift protocol via
+  [`WithAgentEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentEndpoint) option.
+- Jaeger collector using `jaeger.thrift` over HTTP via
+  [`WithCollectorEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithCollectorEndpoint) option.
+
+### Environment Variables
+
+The following environment variables can be used
+(instead of options objects) to override the default configuration.
+
+| Environment variable              | Option                                                                                        | Default value                       |
+| --------------------------------- | --------------------------------------------------------------------------------------------- | ----------------------------------- |
+| `OTEL_EXPORTER_JAEGER_AGENT_HOST` | [`WithAgentHost`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentHost) | `localhost`                         |
+| `OTEL_EXPORTER_JAEGER_AGENT_PORT` | [`WithAgentPort`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentPort) | `6831`                              |
+| `OTEL_EXPORTER_JAEGER_ENDPOINT`   | [`WithEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithEndpoint)   | `http://localhost:14268/api/traces` |
+| `OTEL_EXPORTER_JAEGER_USER`       | [`WithUsername`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithUsername)   |                                     |
+| `OTEL_EXPORTER_JAEGER_PASSWORD`   | [`WithPassword`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithPassword)   |                                     |
+
+Configuration using options have precedence over the environment variables.
+
+## Contributing
+
+This exporter uses a vendored copy of the Apache Thrift library (v0.14.1) at a custom import path.
+When re-generating Thrift code in the future, please adapt import paths as necessary.
+
+## References
+
+- [Jaeger](https://www.jaegertracing.io/)
+- [OpenTelemetry to Jaeger Transformation](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md)
+- [OpenTelemetry Environment Variable Specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go
new file mode 100644
index 000000000..a050020bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go
@@ -0,0 +1,213 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"time"
+
+	"github.com/go-logr/logr"
+
+	genAgent "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent"
+	gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
+)
+
+const (
+	// udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent.
+	udpPacketMaxLength = 65000
+	// emitBatchOverhead is the additional overhead bytes used for enveloping the datagram,
+	// synced with jaeger-agent https://github.com/jaegertracing/jaeger-client-go/blob/master/transport_udp.go#L37
+	emitBatchOverhead = 70
+)
+
+// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface.
+type agentClientUDP struct {
+	genAgent.Agent
+	io.Closer
+
+	connUDP        udpConn
+	client         *genAgent.AgentClient
+	maxPacketSize  int                   // max size of datagram in bytes
+	thriftBuffer   *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+	thriftProtocol thrift.TProtocol
+}
+
+type udpConn interface {
+	Write([]byte) (int, error)
+	SetWriteBuffer(int) error
+	Close() error
+}
+
+type agentClientUDPParams struct {
+	Host                     string
+	Port                     string
+	MaxPacketSize            int
+	Logger                   logr.Logger
+	AttemptReconnecting      bool
+	AttemptReconnectInterval time.Duration
+}
+
+// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
+func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) {
+	hostPort := net.JoinHostPort(params.Host, params.Port)
+	// validate hostport
+	if _, _, err := net.SplitHostPort(hostPort); err != nil {
+		return nil, err
+	}
+
+	if params.MaxPacketSize <= 0 || params.MaxPacketSize > udpPacketMaxLength {
+		params.MaxPacketSize = udpPacketMaxLength
+	}
+
+	if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 {
+		params.AttemptReconnectInterval = time.Second * 30
+	}
+
+	thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
+	protocolFactory := thrift.NewTCompactProtocolFactoryConf(&thrift.TConfiguration{})
+	thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
+	client := genAgent.NewAgentClientFactory(thriftBuffer, protocolFactory)
+
+	var connUDP udpConn
+	var err error
+
+	if params.AttemptReconnecting {
+		// host is hostname, setup resolver loop in case host record changes during operation
+		connUDP, err = newReconnectingUDPConn(hostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		destAddr, err := net.ResolveUDPAddr("udp", hostPort)
+		if err != nil {
+			return nil, err
+		}
+
+		connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil {
+		return nil, err
+	}
+
+	return &agentClientUDP{
+		connUDP:        connUDP,
+		client:         client,
+		maxPacketSize:  params.MaxPacketSize,
+		thriftBuffer:   thriftBuffer,
+		thriftProtocol: thriftProtocol,
+	}, nil
+}
+
+// EmitBatch buffers batch to fit into UDP packets and sends the data to the agent.
+func (a *agentClientUDP) EmitBatch(ctx context.Context, batch *gen.Batch) error {
+	var errs []error
+	processSize, err := a.calcSizeOfSerializedThrift(ctx, batch.Process)
+	if err != nil {
+		// drop the batch if serialization of process fails.
+		return err
+	}
+
+	maxPacketSize := a.maxPacketSize
+	if maxPacketSize > udpPacketMaxLength-emitBatchOverhead {
+		maxPacketSize = udpPacketMaxLength - emitBatchOverhead
+	}
+	totalSize := processSize
+	var spans []*gen.Span
+	for _, span := range batch.Spans {
+		spanSize, err := a.calcSizeOfSerializedThrift(ctx, span)
+		if err != nil {
+			errs = append(errs, fmt.Errorf("thrift serialization failed: %v", span))
+			continue
+		}
+		if spanSize+processSize >= maxPacketSize {
+			// drop the span that exceeds the limit.
+			errs = append(errs, fmt.Errorf("span too large to send: %v", span))
+			continue
+		}
+		if totalSize+spanSize >= maxPacketSize {
+			if err := a.flush(ctx, &gen.Batch{
+				Process: batch.Process,
+				Spans:   spans,
+			}); err != nil {
+				errs = append(errs, err)
+			}
+			spans = spans[:0]
+			totalSize = processSize
+		}
+		totalSize += spanSize
+		spans = append(spans, span)
+	}
+
+	if len(spans) > 0 {
+		if err := a.flush(ctx, &gen.Batch{
+			Process: batch.Process,
+			Spans:   spans,
+		}); err != nil {
+			errs = append(errs, err)
+		}
+	}
+
+	if len(errs) == 1 {
+		return errs[0]
+	} else if len(errs) > 1 {
+		joined := a.makeJoinedErrorString(errs)
+		return fmt.Errorf("multiple errors during transform: %s", joined)
+	}
+	return nil
+}
+
+// makeJoinedErrorString join all the errors to one error message.
+func (a *agentClientUDP) makeJoinedErrorString(errs []error) string {
+	var errMsgs []string
+	for _, err := range errs {
+		errMsgs = append(errMsgs, err.Error())
+	}
+	return strings.Join(errMsgs, ", ")
+}
+
+// flush will send the batch of spans to the agent.
+func (a *agentClientUDP) flush(ctx context.Context, batch *gen.Batch) error {
+	a.thriftBuffer.Reset()
+	if err := a.client.EmitBatch(ctx, batch); err != nil {
+		return err
+	}
+	if a.thriftBuffer.Len() > a.maxPacketSize {
+		return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d",
+			a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
+	}
+	_, err := a.connUDP.Write(a.thriftBuffer.Bytes())
+	return err
+}
+
+// calcSizeOfSerializedThrift calculate the serialized thrift packet size.
+func (a *agentClientUDP) calcSizeOfSerializedThrift(ctx context.Context, thriftStruct thrift.TStruct) (int, error) {
+	a.thriftBuffer.Reset()
+	err := thriftStruct.Write(ctx, a.thriftProtocol)
+	return a.thriftBuffer.Len(), err
+}
+
+// Close implements Close() of io.Closer and closes the underlying UDP connection.
+func (a *agentClientUDP) Close() error {
+	return a.connUDP.Close()
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go
new file mode 100644
index 000000000..0d7ba8676
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package jaeger contains an OpenTelemetry tracing exporter for Jaeger.
+package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go
new file mode 100644
index 000000000..a7253e483
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go
@@ -0,0 +1,44 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
+
+import (
+	"os"
+)
+
+// Environment variable names.
+const (
+	// Hostname for the Jaeger agent, part of address where exporter sends spans
+	// i.e.	"localhost".
+	envAgentHost = "OTEL_EXPORTER_JAEGER_AGENT_HOST"
+	// Port for the Jaeger agent, part of address where exporter sends spans
+	// i.e. 6831.
+	envAgentPort = "OTEL_EXPORTER_JAEGER_AGENT_PORT"
+	// The HTTP endpoint for sending spans directly to a collector,
+	// i.e. http://jaeger-collector:14268/api/traces.
+	envEndpoint = "OTEL_EXPORTER_JAEGER_ENDPOINT"
+	// Username to send as part of "Basic" authentication to the collector endpoint.
+	envUser = "OTEL_EXPORTER_JAEGER_USER"
+	// Password to send as part of "Basic" authentication to the collector endpoint.
+	envPassword = "OTEL_EXPORTER_JAEGER_PASSWORD"
+)
+
+// envOr returns an env variable's value if it is exists or the default if not.
+func envOr(key, defaultValue string) string {
+	if v, ok := os.LookupEnv(key); ok && v != "" {
+		return v
+	}
+	return defaultValue
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go
new file mode 100644
index 000000000..54cd3b086
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go
new file mode 100644
index 000000000..3b96e3222
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go
@@ -0,0 +1,27 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+func init() {
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go
new file mode 100644
index 000000000..c7c8e9ca3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go
@@ -0,0 +1,412 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+type Agent interface {
+	// Parameters:
+	//  - Spans
+	EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
+	// Parameters:
+	//  - Batch
+	EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
+}
+
+type AgentClient struct {
+	c    thrift.TClient
+	meta thrift.ResponseMeta
+}
+
+func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
+	return &AgentClient{
+		c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+	}
+}
+
+func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
+	return &AgentClient{
+		c: thrift.NewTStandardClient(iprot, oprot),
+	}
+}
+
+func NewAgentClient(c thrift.TClient) *AgentClient {
+	return &AgentClient{
+		c: c,
+	}
+}
+
+func (p *AgentClient) Client_() thrift.TClient {
+	return p.c
+}
+
+func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
+	return p.meta
+}
+
+func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+	p.meta = meta
+}
+
+// Parameters:
+//  - Spans
+func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
+	var _args0 AgentEmitZipkinBatchArgs
+	_args0.Spans = spans
+	p.SetLastResponseMeta_(thrift.ResponseMeta{})
+	if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Parameters:
+//  - Batch
+func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
+	var _args1 AgentEmitBatchArgs
+	_args1.Batch = batch
+	p.SetLastResponseMeta_(thrift.ResponseMeta{})
+	if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
+		return err
+	}
+	return nil
+}
+
+type AgentProcessor struct {
+	processorMap map[string]thrift.TProcessorFunction
+	handler      Agent
+}
+
+func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+	p.processorMap[key] = processor
+}
+
+func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+	processor, ok = p.processorMap[key]
+	return processor, ok
+}
+
+func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+	return p.processorMap
+}
+
+func NewAgentProcessor(handler Agent) *AgentProcessor {
+
+	self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+	self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
+	self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
+	return self2
+}
+
+func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+	if err2 != nil {
+		return false, thrift.WrapTException(err2)
+	}
+	if processor, ok := p.GetProcessorFunction(name); ok {
+		return processor.Process(ctx, seqId, iprot, oprot)
+	}
+	iprot.Skip(ctx, thrift.STRUCT)
+	iprot.ReadMessageEnd(ctx)
+	x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+	oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+	x3.Write(ctx, oprot)
+	oprot.WriteMessageEnd(ctx)
+	oprot.Flush(ctx)
+	return false, x3
+
+}
+
+type agentProcessorEmitZipkinBatch struct {
+	handler Agent
+}
+
+func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	args := AgentEmitZipkinBatchArgs{}
+	var err2 error
+	if err2 = args.Read(ctx, iprot); err2 != nil {
+		iprot.ReadMessageEnd(ctx)
+		return false, thrift.WrapTException(err2)
+	}
+	iprot.ReadMessageEnd(ctx)
+
+	tickerCancel := func() {}
+	_ = tickerCancel
+
+	if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
+		tickerCancel()
+		return true, thrift.WrapTException(err2)
+	}
+	tickerCancel()
+	return true, nil
+}
+
+type agentProcessorEmitBatch struct {
+	handler Agent
+}
+
+func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	args := AgentEmitBatchArgs{}
+	var err2 error
+	if err2 = args.Read(ctx, iprot); err2 != nil {
+		iprot.ReadMessageEnd(ctx)
+		return false, thrift.WrapTException(err2)
+	}
+	iprot.ReadMessageEnd(ctx)
+
+	tickerCancel := func() {}
+	_ = tickerCancel
+
+	if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
+		tickerCancel()
+		return true, thrift.WrapTException(err2)
+	}
+	tickerCancel()
+	return true, nil
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Spans
+type AgentEmitZipkinBatchArgs struct {
+	Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
+}
+
+func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
+	return &AgentEmitZipkinBatchArgs{}
+}
+
+func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
+	return p.Spans
+}
+func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*zipkincore.Span, 0, size)
+	p.Spans = tSlice
+	for i := 0; i < size; i++ {
+		_elem4 := &zipkincore.Span{}
+		if err := _elem4.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+		}
+		p.Spans = append(p.Spans, _elem4)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
+	}
+	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Spans {
+		if err := v.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(ctx); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
+	}
+	return err
+}
+
+func (p *AgentEmitZipkinBatchArgs) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Batch
+type AgentEmitBatchArgs struct {
+	Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
+}
+
+func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
+	return &AgentEmitBatchArgs{}
+}
+
+var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
+
+func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
+	if !p.IsSetBatch() {
+		return AgentEmitBatchArgs_Batch_DEFAULT
+	}
+	return p.Batch
+}
+func (p *AgentEmitBatchArgs) IsSetBatch() bool {
+	return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.STRUCT {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	p.Batch = &jaeger.Batch{}
+	if err := p.Batch.Read(ctx, iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+	}
+	return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
+	}
+	if err := p.Batch.Write(ctx, oprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
+	}
+	return err
+}
+
+func (p *AgentEmitBatchArgs) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go
new file mode 100644
index 000000000..fe45a9f9a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go
new file mode 100644
index 000000000..10162857f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go
@@ -0,0 +1,22 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+func init() {
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go
new file mode 100644
index 000000000..b1fe26c57
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go
@@ -0,0 +1,3022 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+import (
+	"bytes"
+	"context"
+	"database/sql/driver"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type TagType int64
+
+const (
+	TagType_STRING TagType = 0
+	TagType_DOUBLE TagType = 1
+	TagType_BOOL   TagType = 2
+	TagType_LONG   TagType = 3
+	TagType_BINARY TagType = 4
+)
+
+func (p TagType) String() string {
+	switch p {
+	case TagType_STRING:
+		return "STRING"
+	case TagType_DOUBLE:
+		return "DOUBLE"
+	case TagType_BOOL:
+		return "BOOL"
+	case TagType_LONG:
+		return "LONG"
+	case TagType_BINARY:
+		return "BINARY"
+	}
+	return "<UNSET>"
+}
+
+func TagTypeFromString(s string) (TagType, error) {
+	switch s {
+	case "STRING":
+		return TagType_STRING, nil
+	case "DOUBLE":
+		return TagType_DOUBLE, nil
+	case "BOOL":
+		return TagType_BOOL, nil
+	case "LONG":
+		return TagType_LONG, nil
+	case "BINARY":
+		return TagType_BINARY, nil
+	}
+	return TagType(0), fmt.Errorf("not a valid TagType string")
+}
+
+func TagTypePtr(v TagType) *TagType { return &v }
+
+func (p TagType) MarshalText() ([]byte, error) {
+	return []byte(p.String()), nil
+}
+
+func (p *TagType) UnmarshalText(text []byte) error {
+	q, err := TagTypeFromString(string(text))
+	if err != nil {
+		return err
+	}
+	*p = q
+	return nil
+}
+
+func (p *TagType) Scan(value interface{}) error {
+	v, ok := value.(int64)
+	if !ok {
+		return errors.New("Scan value is not int64")
+	}
+	*p = TagType(v)
+	return nil
+}
+
+func (p *TagType) Value() (driver.Value, error) {
+	if p == nil {
+		return nil, nil
+	}
+	return int64(*p), nil
+}
+
+type SpanRefType int64
+
+const (
+	SpanRefType_CHILD_OF     SpanRefType = 0
+	SpanRefType_FOLLOWS_FROM SpanRefType = 1
+)
+
+func (p SpanRefType) String() string {
+	switch p {
+	case SpanRefType_CHILD_OF:
+		return "CHILD_OF"
+	case SpanRefType_FOLLOWS_FROM:
+		return "FOLLOWS_FROM"
+	}
+	return "<UNSET>"
+}
+
+func SpanRefTypeFromString(s string) (SpanRefType, error) {
+	switch s {
+	case "CHILD_OF":
+		return SpanRefType_CHILD_OF, nil
+	case "FOLLOWS_FROM":
+		return SpanRefType_FOLLOWS_FROM, nil
+	}
+	return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
+}
+
+func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
+
+func (p SpanRefType) MarshalText() ([]byte, error) {
+	return []byte(p.String()), nil
+}
+
+func (p *SpanRefType) UnmarshalText(text []byte) error {
+	q, err := SpanRefTypeFromString(string(text))
+	if err != nil {
+		return err
+	}
+	*p = q
+	return nil
+}
+
+func (p *SpanRefType) Scan(value interface{}) error {
+	v, ok := value.(int64)
+	if !ok {
+		return errors.New("Scan value is not int64")
+	}
+	*p = SpanRefType(v)
+	return nil
+}
+
+func (p *SpanRefType) Value() (driver.Value, error) {
+	if p == nil {
+		return nil, nil
+	}
+	return int64(*p), nil
+}
+
+// Attributes:
+//  - Key
+//  - VType
+//  - VStr
+//  - VDouble
+//  - VBool
+//  - VLong
+//  - VBinary
+type Tag struct {
+	Key     string   `thrift:"key,1,required" db:"key" json:"key"`
+	VType   TagType  `thrift:"vType,2,required" db:"vType" json:"vType"`
+	VStr    *string  `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"`
+	VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"`
+	VBool   *bool    `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"`
+	VLong   *int64   `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"`
+	VBinary []byte   `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"`
+}
+
+func NewTag() *Tag {
+	return &Tag{}
+}
+
+func (p *Tag) GetKey() string {
+	return p.Key
+}
+
+func (p *Tag) GetVType() TagType {
+	return p.VType
+}
+
+var Tag_VStr_DEFAULT string
+
+func (p *Tag) GetVStr() string {
+	if !p.IsSetVStr() {
+		return Tag_VStr_DEFAULT
+	}
+	return *p.VStr
+}
+
+var Tag_VDouble_DEFAULT float64
+
+func (p *Tag) GetVDouble() float64 {
+	if !p.IsSetVDouble() {
+		return Tag_VDouble_DEFAULT
+	}
+	return *p.VDouble
+}
+
+var Tag_VBool_DEFAULT bool
+
+func (p *Tag) GetVBool() bool {
+	if !p.IsSetVBool() {
+		return Tag_VBool_DEFAULT
+	}
+	return *p.VBool
+}
+
+var Tag_VLong_DEFAULT int64
+
+func (p *Tag) GetVLong() int64 {
+	if !p.IsSetVLong() {
+		return Tag_VLong_DEFAULT
+	}
+	return *p.VLong
+}
+
+var Tag_VBinary_DEFAULT []byte
+
+func (p *Tag) GetVBinary() []byte {
+	return p.VBinary
+}
+func (p *Tag) IsSetVStr() bool {
+	return p.VStr != nil
+}
+
+func (p *Tag) IsSetVDouble() bool {
+	return p.VDouble != nil
+}
+
+func (p *Tag) IsSetVBool() bool {
+	return p.VBool != nil
+}
+
+func (p *Tag) IsSetVLong() bool {
+	return p.VLong != nil
+}
+
+func (p *Tag) IsSetVBinary() bool {
+	return p.VBinary != nil
+}
+
+func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetKey bool = false
+	var issetVType bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+				issetKey = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.I32 {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+				issetVType = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 3:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField3(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 4:
+			if fieldTypeId == thrift.DOUBLE {
+				if err := p.ReadField4(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 5:
+			if fieldTypeId == thrift.BOOL {
+				if err := p.ReadField5(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 6:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField6(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 7:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField7(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetKey {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"))
+	}
+	if !issetVType {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"))
+	}
+	return nil
+}
+
+func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Key = v
+	}
+	return nil
+}
+
+func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(ctx); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		temp := TagType(v)
+		p.VType = temp
+	}
+	return nil
+}
+
+func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(ctx); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.VStr = &v
+	}
+	return nil
+}
+
+func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadDouble(ctx); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.VDouble = &v
+	}
+	return nil
+}
+
+func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBool(ctx); err != nil {
+		return thrift.PrependError("error reading field 5: ", err)
+	} else {
+		p.VBool = &v
+	}
+	return nil
+}
+
+func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 6: ", err)
+	} else {
+		p.VLong = &v
+	}
+	return nil
+}
+
+func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBinary(ctx); err != nil {
+		return thrift.PrependError("error reading field 7: ", err)
+	} else {
+		p.VBinary = v
+	}
+	return nil
+}
+
+func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField3(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField4(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField5(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField6(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField7(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
+	}
+	if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
+	}
+	return err
+}
+
+func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err)
+	}
+	if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err)
+	}
+	return err
+}
+
+func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetVStr() {
+		if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err)
+		}
+		if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetVDouble() {
+		if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err)
+		}
+		if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetVBool() {
+		if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err)
+		}
+		if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetVLong() {
+		if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err)
+		}
+		if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetVBinary() {
+		if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err)
+		}
+		if err := oprot.WriteBinary(ctx, p.VBinary); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Tag) Equals(other *Tag) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.Key != other.Key {
+		return false
+	}
+	if p.VType != other.VType {
+		return false
+	}
+	if p.VStr != other.VStr {
+		if p.VStr == nil || other.VStr == nil {
+			return false
+		}
+		if (*p.VStr) != (*other.VStr) {
+			return false
+		}
+	}
+	if p.VDouble != other.VDouble {
+		if p.VDouble == nil || other.VDouble == nil {
+			return false
+		}
+		if (*p.VDouble) != (*other.VDouble) {
+			return false
+		}
+	}
+	if p.VBool != other.VBool {
+		if p.VBool == nil || other.VBool == nil {
+			return false
+		}
+		if (*p.VBool) != (*other.VBool) {
+			return false
+		}
+	}
+	if p.VLong != other.VLong {
+		if p.VLong == nil || other.VLong == nil {
+			return false
+		}
+		if (*p.VLong) != (*other.VLong) {
+			return false
+		}
+	}
+	if bytes.Compare(p.VBinary, other.VBinary) != 0 {
+		return false
+	}
+	return true
+}
+
+func (p *Tag) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Tag(%+v)", *p)
+}
+
+// Attributes:
+//  - Timestamp
+//  - Fields
+type Log struct {
+	Timestamp int64  `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"`
+	Fields    []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"`
+}
+
+func NewLog() *Log {
+	return &Log{}
+}
+
+func (p *Log) GetTimestamp() int64 {
+	return p.Timestamp
+}
+
+func (p *Log) GetFields() []*Tag {
+	return p.Fields
+}
+func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetTimestamp bool = false
+	var issetFields bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+				issetTimestamp = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+				issetFields = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetTimestamp {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"))
+	}
+	if !issetFields {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"))
+	}
+	return nil
+}
+
+func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Timestamp = v
+	}
+	return nil
+}
+
+func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Tag, 0, size)
+	p.Fields = tSlice
+	for i := 0; i < size; i++ {
+		_elem0 := &Tag{}
+		if err := _elem0.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+		}
+		p.Fields = append(p.Fields, _elem0)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "Log"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
+	}
+	return err
+}
+
+func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err)
+	}
+	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Fields {
+		if err := v.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(ctx); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err)
+	}
+	return err
+}
+
+func (p *Log) Equals(other *Log) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.Timestamp != other.Timestamp {
+		return false
+	}
+	if len(p.Fields) != len(other.Fields) {
+		return false
+	}
+	for i, _tgt := range p.Fields {
+		_src1 := other.Fields[i]
+		if !_tgt.Equals(_src1) {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *Log) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Log(%+v)", *p)
+}
+
+// Attributes:
+//  - RefType
+//  - TraceIdLow
+//  - TraceIdHigh
+//  - SpanId
+type SpanRef struct {
+	RefType     SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"`
+	TraceIdLow  int64       `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"`
+	TraceIdHigh int64       `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"`
+	SpanId      int64       `thrift:"spanId,4,required" db:"spanId" json:"spanId"`
+}
+
+func NewSpanRef() *SpanRef {
+	return &SpanRef{}
+}
+
+func (p *SpanRef) GetRefType() SpanRefType {
+	return p.RefType
+}
+
+func (p *SpanRef) GetTraceIdLow() int64 {
+	return p.TraceIdLow
+}
+
+func (p *SpanRef) GetTraceIdHigh() int64 {
+	return p.TraceIdHigh
+}
+
+func (p *SpanRef) GetSpanId() int64 {
+	return p.SpanId
+}
+func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetRefType bool = false
+	var issetTraceIdLow bool = false
+	var issetTraceIdHigh bool = false
+	var issetSpanId bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.I32 {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+				issetRefType = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+				issetTraceIdLow = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 3:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField3(ctx, iprot); err != nil {
+					return err
+				}
+				issetTraceIdHigh = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 4:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField4(ctx, iprot); err != nil {
+					return err
+				}
+				issetSpanId = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetRefType {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"))
+	}
+	if !issetTraceIdLow {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
+	}
+	if !issetTraceIdHigh {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
+	}
+	if !issetSpanId {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
+	}
+	return nil
+}
+
+func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		temp := SpanRefType(v)
+		p.RefType = temp
+	}
+	return nil
+}
+
+func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.TraceIdLow = v
+	}
+	return nil
+}
+
+func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.TraceIdHigh = v
+	}
+	return nil
+}
+
+func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.SpanId = v
+	}
+	return nil
+}
+
+func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField3(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField4(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err)
+	}
+	if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err)
+	}
+	return err
+}
+
+func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err)
+	}
+	return err
+}
+
+func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err)
+	}
+	return err
+}
+
+func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err)
+	}
+	return err
+}
+
+func (p *SpanRef) Equals(other *SpanRef) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.RefType != other.RefType {
+		return false
+	}
+	if p.TraceIdLow != other.TraceIdLow {
+		return false
+	}
+	if p.TraceIdHigh != other.TraceIdHigh {
+		return false
+	}
+	if p.SpanId != other.SpanId {
+		return false
+	}
+	return true
+}
+
+func (p *SpanRef) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("SpanRef(%+v)", *p)
+}
+
+// Attributes:
+//  - TraceIdLow
+//  - TraceIdHigh
+//  - SpanId
+//  - ParentSpanId
+//  - OperationName
+//  - References
+//  - Flags
+//  - StartTime
+//  - Duration
+//  - Tags
+//  - Logs
+type Span struct {
+	TraceIdLow    int64      `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"`
+	TraceIdHigh   int64      `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"`
+	SpanId        int64      `thrift:"spanId,3,required" db:"spanId" json:"spanId"`
+	ParentSpanId  int64      `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"`
+	OperationName string     `thrift:"operationName,5,required" db:"operationName" json:"operationName"`
+	References    []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"`
+	Flags         int32      `thrift:"flags,7,required" db:"flags" json:"flags"`
+	StartTime     int64      `thrift:"startTime,8,required" db:"startTime" json:"startTime"`
+	Duration      int64      `thrift:"duration,9,required" db:"duration" json:"duration"`
+	Tags          []*Tag     `thrift:"tags,10" db:"tags" json:"tags,omitempty"`
+	Logs          []*Log     `thrift:"logs,11" db:"logs" json:"logs,omitempty"`
+}
+
+func NewSpan() *Span {
+	return &Span{}
+}
+
+func (p *Span) GetTraceIdLow() int64 {
+	return p.TraceIdLow
+}
+
+func (p *Span) GetTraceIdHigh() int64 {
+	return p.TraceIdHigh
+}
+
+func (p *Span) GetSpanId() int64 {
+	return p.SpanId
+}
+
+func (p *Span) GetParentSpanId() int64 {
+	return p.ParentSpanId
+}
+
+func (p *Span) GetOperationName() string {
+	return p.OperationName
+}
+
+var Span_References_DEFAULT []*SpanRef
+
+func (p *Span) GetReferences() []*SpanRef {
+	return p.References
+}
+
+func (p *Span) GetFlags() int32 {
+	return p.Flags
+}
+
+func (p *Span) GetStartTime() int64 {
+	return p.StartTime
+}
+
+func (p *Span) GetDuration() int64 {
+	return p.Duration
+}
+
+var Span_Tags_DEFAULT []*Tag
+
+func (p *Span) GetTags() []*Tag {
+	return p.Tags
+}
+
+var Span_Logs_DEFAULT []*Log
+
+func (p *Span) GetLogs() []*Log {
+	return p.Logs
+}
+func (p *Span) IsSetReferences() bool {
+	return p.References != nil
+}
+
+func (p *Span) IsSetTags() bool {
+	return p.Tags != nil
+}
+
+func (p *Span) IsSetLogs() bool {
+	return p.Logs != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetTraceIdLow bool = false
+	var issetTraceIdHigh bool = false
+	var issetSpanId bool = false
+	var issetParentSpanId bool = false
+	var issetOperationName bool = false
+	var issetFlags bool = false
+	var issetStartTime bool = false
+	var issetDuration bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+				issetTraceIdLow = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+				issetTraceIdHigh = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 3:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField3(ctx, iprot); err != nil {
+					return err
+				}
+				issetSpanId = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 4:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField4(ctx, iprot); err != nil {
+					return err
+				}
+				issetParentSpanId = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 5:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField5(ctx, iprot); err != nil {
+					return err
+				}
+				issetOperationName = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 6:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField6(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 7:
+			if fieldTypeId == thrift.I32 {
+				if err := p.ReadField7(ctx, iprot); err != nil {
+					return err
+				}
+				issetFlags = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 8:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField8(ctx, iprot); err != nil {
+					return err
+				}
+				issetStartTime = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 9:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField9(ctx, iprot); err != nil {
+					return err
+				}
+				issetDuration = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 10:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField10(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 11:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField11(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetTraceIdLow {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
+	}
+	if !issetTraceIdHigh {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
+	}
+	if !issetSpanId {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
+	}
+	if !issetParentSpanId {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"))
+	}
+	if !issetOperationName {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"))
+	}
+	if !issetFlags {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"))
+	}
+	if !issetStartTime {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"))
+	}
+	if !issetDuration {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"))
+	}
+	return nil
+}
+
+func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.TraceIdLow = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.TraceIdHigh = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.SpanId = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.ParentSpanId = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(ctx); err != nil {
+		return thrift.PrependError("error reading field 5: ", err)
+	} else {
+		p.OperationName = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*SpanRef, 0, size)
+	p.References = tSlice
+	for i := 0; i < size; i++ {
+		_elem2 := &SpanRef{}
+		if err := _elem2.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+		}
+		p.References = append(p.References, _elem2)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(ctx); err != nil {
+		return thrift.PrependError("error reading field 7: ", err)
+	} else {
+		p.Flags = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 8: ", err)
+	} else {
+		p.StartTime = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 9: ", err)
+	} else {
+		p.Duration = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Tag, 0, size)
+	p.Tags = tSlice
+	for i := 0; i < size; i++ {
+		_elem3 := &Tag{}
+		if err := _elem3.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
+		}
+		p.Tags = append(p.Tags, _elem3)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Log, 0, size)
+	p.Logs = tSlice
+	for i := 0; i < size; i++ {
+		_elem4 := &Log{}
+		if err := _elem4.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+		}
+		p.Logs = append(p.Logs, _elem4)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField3(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField4(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField5(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField6(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField7(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField8(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField9(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField10(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField11(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err)
+	}
+	if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetReferences() {
+		if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err)
+		}
+		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.References {
+			if err := v.Write(ctx, oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(ctx); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err)
+	}
+	if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetTags() {
+		if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err)
+		}
+		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.Tags {
+			if err := v.Write(ctx, oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(ctx); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetLogs() {
+		if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err)
+		}
+		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.Logs {
+			if err := v.Write(ctx, oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(ctx); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.TraceIdLow != other.TraceIdLow {
+		return false
+	}
+	if p.TraceIdHigh != other.TraceIdHigh {
+		return false
+	}
+	if p.SpanId != other.SpanId {
+		return false
+	}
+	if p.ParentSpanId != other.ParentSpanId {
+		return false
+	}
+	if p.OperationName != other.OperationName {
+		return false
+	}
+	if len(p.References) != len(other.References) {
+		return false
+	}
+	for i, _tgt := range p.References {
+		_src5 := other.References[i]
+		if !_tgt.Equals(_src5) {
+			return false
+		}
+	}
+	if p.Flags != other.Flags {
+		return false
+	}
+	if p.StartTime != other.StartTime {
+		return false
+	}
+	if p.Duration != other.Duration {
+		return false
+	}
+	if len(p.Tags) != len(other.Tags) {
+		return false
+	}
+	for i, _tgt := range p.Tags {
+		_src6 := other.Tags[i]
+		if !_tgt.Equals(_src6) {
+			return false
+		}
+	}
+	if len(p.Logs) != len(other.Logs) {
+		return false
+	}
+	for i, _tgt := range p.Logs {
+		_src7 := other.Logs[i]
+		if !_tgt.Equals(_src7) {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *Span) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+//  - ServiceName
+//  - Tags
+type Process struct {
+	ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"`
+	Tags        []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"`
+}
+
+func NewProcess() *Process {
+	return &Process{}
+}
+
+func (p *Process) GetServiceName() string {
+	return p.ServiceName
+}
+
+var Process_Tags_DEFAULT []*Tag
+
+func (p *Process) GetTags() []*Tag {
+	return p.Tags
+}
+func (p *Process) IsSetTags() bool {
+	return p.Tags != nil
+}
+
+func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetServiceName bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+				issetServiceName = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetServiceName {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"))
+	}
+	return nil
+}
+
+func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.ServiceName = v
+	}
+	return nil
+}
+
+func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Tag, 0, size)
+	p.Tags = tSlice
+	for i := 0; i < size; i++ {
+		_elem8 := &Tag{}
+		if err := _elem8.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err)
+		}
+		p.Tags = append(p.Tags, _elem8)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "Process"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
+	}
+	if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
+	}
+	return err
+}
+
+func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetTags() {
+		if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err)
+		}
+		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.Tags {
+			if err := v.Write(ctx, oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(ctx); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Process) Equals(other *Process) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.ServiceName != other.ServiceName {
+		return false
+	}
+	if len(p.Tags) != len(other.Tags) {
+		return false
+	}
+	for i, _tgt := range p.Tags {
+		_src9 := other.Tags[i]
+		if !_tgt.Equals(_src9) {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *Process) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Process(%+v)", *p)
+}
+
+// Attributes:
+//  - FullQueueDroppedSpans
+//  - TooLargeDroppedSpans
+//  - FailedToEmitSpans
+type ClientStats struct {
+	FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"`
+	TooLargeDroppedSpans  int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"`
+	FailedToEmitSpans     int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"`
+}
+
+func NewClientStats() *ClientStats {
+	return &ClientStats{}
+}
+
+func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
+	return p.FullQueueDroppedSpans
+}
+
+func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
+	return p.TooLargeDroppedSpans
+}
+
+func (p *ClientStats) GetFailedToEmitSpans() int64 {
+	return p.FailedToEmitSpans
+}
+func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetFullQueueDroppedSpans bool = false
+	var issetTooLargeDroppedSpans bool = false
+	var issetFailedToEmitSpans bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+				issetFullQueueDroppedSpans = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+				issetTooLargeDroppedSpans = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 3:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField3(ctx, iprot); err != nil {
+					return err
+				}
+				issetFailedToEmitSpans = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetFullQueueDroppedSpans {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"))
+	}
+	if !issetTooLargeDroppedSpans {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"))
+	}
+	if !issetFailedToEmitSpans {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"))
+	}
+	return nil
+}
+
+func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.FullQueueDroppedSpans = v
+	}
+	return nil
+}
+
+func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.TooLargeDroppedSpans = v
+	}
+	return nil
+}
+
+func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.FailedToEmitSpans = v
+	}
+	return nil
+}
+
+func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField3(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err)
+	}
+	return err
+}
+
+func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err)
+	}
+	return err
+}
+
+func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err)
+	}
+	return err
+}
+
+func (p *ClientStats) Equals(other *ClientStats) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans {
+		return false
+	}
+	if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans {
+		return false
+	}
+	if p.FailedToEmitSpans != other.FailedToEmitSpans {
+		return false
+	}
+	return true
+}
+
+func (p *ClientStats) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("ClientStats(%+v)", *p)
+}
+
+// Attributes:
+//  - Process
+//  - Spans
+//  - SeqNo
+//  - Stats
+type Batch struct {
+	Process *Process     `thrift:"process,1,required" db:"process" json:"process"`
+	Spans   []*Span      `thrift:"spans,2,required" db:"spans" json:"spans"`
+	SeqNo   *int64       `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"`
+	Stats   *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"`
+}
+
+func NewBatch() *Batch {
+	return &Batch{}
+}
+
+var Batch_Process_DEFAULT *Process
+
+func (p *Batch) GetProcess() *Process {
+	if !p.IsSetProcess() {
+		return Batch_Process_DEFAULT
+	}
+	return p.Process
+}
+
+func (p *Batch) GetSpans() []*Span {
+	return p.Spans
+}
+
+var Batch_SeqNo_DEFAULT int64
+
+func (p *Batch) GetSeqNo() int64 {
+	if !p.IsSetSeqNo() {
+		return Batch_SeqNo_DEFAULT
+	}
+	return *p.SeqNo
+}
+
+var Batch_Stats_DEFAULT *ClientStats
+
+func (p *Batch) GetStats() *ClientStats {
+	if !p.IsSetStats() {
+		return Batch_Stats_DEFAULT
+	}
+	return p.Stats
+}
+func (p *Batch) IsSetProcess() bool {
+	return p.Process != nil
+}
+
+func (p *Batch) IsSetSeqNo() bool {
+	return p.SeqNo != nil
+}
+
+func (p *Batch) IsSetStats() bool {
+	return p.Stats != nil
+}
+
+func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetProcess bool = false
+	var issetSpans bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.STRUCT {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+				issetProcess = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+				issetSpans = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 3:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField3(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 4:
+			if fieldTypeId == thrift.STRUCT {
+				if err := p.ReadField4(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetProcess {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"))
+	}
+	if !issetSpans {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"))
+	}
+	return nil
+}
+
+func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	p.Process = &Process{}
+	if err := p.Process.Read(ctx, iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
+	}
+	return nil
+}
+
+func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Span, 0, size)
+	p.Spans = tSlice
+	for i := 0; i < size; i++ {
+		_elem10 := &Span{}
+		if err := _elem10.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+		}
+		p.Spans = append(p.Spans, _elem10)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.SeqNo = &v
+	}
+	return nil
+}
+
+func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+	p.Stats = &ClientStats{}
+	if err := p.Stats.Read(ctx, iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
+	}
+	return nil
+}
+
+func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField3(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField4(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err)
+	}
+	if err := p.Process.Write(ctx, oprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err)
+	}
+	return err
+}
+
+func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err)
+	}
+	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Spans {
+		if err := v.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(ctx); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err)
+	}
+	return err
+}
+
+func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetSeqNo() {
+		if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err)
+		}
+		if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetStats() {
+		if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err)
+		}
+		if err := p.Stats.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Batch) Equals(other *Batch) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if !p.Process.Equals(other.Process) {
+		return false
+	}
+	if len(p.Spans) != len(other.Spans) {
+		return false
+	}
+	for i, _tgt := range p.Spans {
+		_src11 := other.Spans[i]
+		if !_tgt.Equals(_src11) {
+			return false
+		}
+	}
+	if p.SeqNo != other.SeqNo {
+		if p.SeqNo == nil || other.SeqNo == nil {
+			return false
+		}
+		if (*p.SeqNo) != (*other.SeqNo) {
+			return false
+		}
+	}
+	if !p.Stats.Equals(other.Stats) {
+		return false
+	}
+	return true
+}
+
+func (p *Batch) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Batch(%+v)", *p)
+}
+
+// Attributes:
+//  - Ok
+type BatchSubmitResponse struct {
+	Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewBatchSubmitResponse() *BatchSubmitResponse {
+	return &BatchSubmitResponse{}
+}
+
+func (p *BatchSubmitResponse) GetOk() bool {
+	return p.Ok
+}
+func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetOk bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.BOOL {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+				issetOk = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetOk {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
+	}
+	return nil
+}
+
+func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBool(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Ok = v
+	}
+	return nil
+}
+
+func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
+	}
+	if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
+	}
+	return err
+}
+
+func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.Ok != other.Ok {
+		return false
+	}
+	return true
+}
+
+func (p *BatchSubmitResponse) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
+}
+
+type Collector interface {
+	// Parameters:
+	//  - Batches
+	SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error)
+}
+
+type CollectorClient struct {
+	c    thrift.TClient
+	meta thrift.ResponseMeta
+}
+
+func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient {
+	return &CollectorClient{
+		c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+	}
+}
+
+func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient {
+	return &CollectorClient{
+		c: thrift.NewTStandardClient(iprot, oprot),
+	}
+}
+
+func NewCollectorClient(c thrift.TClient) *CollectorClient {
+	return &CollectorClient{
+		c: c,
+	}
+}
+
+func (p *CollectorClient) Client_() thrift.TClient {
+	return p.c
+}
+
+func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+	return p.meta
+}
+
+func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+	p.meta = meta
+}
+
+// Parameters:
+//  - Batches
+func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) {
+	var _args12 CollectorSubmitBatchesArgs
+	_args12.Batches = batches
+	var _result14 CollectorSubmitBatchesResult
+	var _meta13 thrift.ResponseMeta
+	_meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14)
+	p.SetLastResponseMeta_(_meta13)
+	if _err != nil {
+		return
+	}
+	return _result14.GetSuccess(), nil
+}
+
+type CollectorProcessor struct {
+	processorMap map[string]thrift.TProcessorFunction
+	handler      Collector
+}
+
+func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+	p.processorMap[key] = processor
+}
+
+func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+	processor, ok = p.processorMap[key]
+	return processor, ok
+}
+
+func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+	return p.processorMap
+}
+
+func NewCollectorProcessor(handler Collector) *CollectorProcessor {
+
+	self15 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+	self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler}
+	return self15
+}
+
+func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+	if err2 != nil {
+		return false, thrift.WrapTException(err2)
+	}
+	if processor, ok := p.GetProcessorFunction(name); ok {
+		return processor.Process(ctx, seqId, iprot, oprot)
+	}
+	iprot.Skip(ctx, thrift.STRUCT)
+	iprot.ReadMessageEnd(ctx)
+	x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+	oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+	x16.Write(ctx, oprot)
+	oprot.WriteMessageEnd(ctx)
+	oprot.Flush(ctx)
+	return false, x16
+
+}
+
+type collectorProcessorSubmitBatches struct {
+	handler Collector
+}
+
+func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	args := CollectorSubmitBatchesArgs{}
+	var err2 error
+	if err2 = args.Read(ctx, iprot); err2 != nil {
+		iprot.ReadMessageEnd(ctx)
+		x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+		oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+		x.Write(ctx, oprot)
+		oprot.WriteMessageEnd(ctx)
+		oprot.Flush(ctx)
+		return false, thrift.WrapTException(err2)
+	}
+	iprot.ReadMessageEnd(ctx)
+
+	tickerCancel := func() {}
+	// Start a goroutine to do server side connectivity check.
+	if thrift.ServerConnectivityCheckInterval > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithCancel(ctx)
+		defer cancel()
+		var tickerCtx context.Context
+		tickerCtx, tickerCancel = context.WithCancel(context.Background())
+		defer tickerCancel()
+		go func(ctx context.Context, cancel context.CancelFunc) {
+			ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+			defer ticker.Stop()
+			for {
+				select {
+				case <-ctx.Done():
+					return
+				case <-ticker.C:
+					if !iprot.Transport().IsOpen() {
+						cancel()
+						return
+					}
+				}
+			}
+		}(tickerCtx, cancel)
+	}
+
+	result := CollectorSubmitBatchesResult{}
+	var retval []*BatchSubmitResponse
+	if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil {
+		tickerCancel()
+		if err2 == thrift.ErrAbandonRequest {
+			return false, thrift.WrapTException(err2)
+		}
+		x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: "+err2.Error())
+		oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+		x.Write(ctx, oprot)
+		oprot.WriteMessageEnd(ctx)
+		oprot.Flush(ctx)
+		return true, thrift.WrapTException(err2)
+	} else {
+		result.Success = retval
+	}
+	tickerCancel()
+	if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil {
+		err = thrift.WrapTException(err2)
+	}
+	if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+		err = thrift.WrapTException(err2)
+	}
+	if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+		err = thrift.WrapTException(err2)
+	}
+	if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+		err = thrift.WrapTException(err2)
+	}
+	if err != nil {
+		return
+	}
+	return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Batches
+type CollectorSubmitBatchesArgs struct {
+	Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"`
+}
+
+func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs {
+	return &CollectorSubmitBatchesArgs{}
+}
+
+func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch {
+	return p.Batches
+}
+func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Batch, 0, size)
+	p.Batches = tSlice
+	for i := 0; i < size; i++ {
+		_elem17 := &Batch{}
+		if err := _elem17.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err)
+		}
+		p.Batches = append(p.Batches, _elem17)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err)
+	}
+	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Batches {
+		if err := v.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(ctx); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err)
+	}
+	return err
+}
+
+func (p *CollectorSubmitBatchesArgs) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type CollectorSubmitBatchesResult struct {
+	Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult {
+	return &CollectorSubmitBatchesResult{}
+}
+
+var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse
+
+func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse {
+	return p.Success
+}
+func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool {
+	return p.Success != nil
+}
+
+func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 0:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField0(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*BatchSubmitResponse, 0, size)
+	p.Success = tSlice
+	for i := 0; i < size; i++ {
+		_elem18 := &BatchSubmitResponse{}
+		if err := _elem18.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err)
+		}
+		p.Success = append(p.Success, _elem18)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField0(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetSuccess() {
+		if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+		}
+		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.Success {
+			if err := v.Write(ctx, oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(ctx); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *CollectorSubmitBatchesResult) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go
new file mode 100644
index 000000000..ebf43018f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go
new file mode 100644
index 000000000..043ecba96
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go
@@ -0,0 +1,39 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+const CLIENT_SEND = "cs"
+const CLIENT_RECV = "cr"
+const SERVER_SEND = "ss"
+const SERVER_RECV = "sr"
+const MESSAGE_SEND = "ms"
+const MESSAGE_RECV = "mr"
+const WIRE_SEND = "ws"
+const WIRE_RECV = "wr"
+const CLIENT_SEND_FRAGMENT = "csf"
+const CLIENT_RECV_FRAGMENT = "crf"
+const SERVER_SEND_FRAGMENT = "ssf"
+const SERVER_RECV_FRAGMENT = "srf"
+const LOCAL_COMPONENT = "lc"
+const CLIENT_ADDR = "ca"
+const SERVER_ADDR = "sa"
+const MESSAGE_ADDR = "ma"
+
+func init() {
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go
new file mode 100644
index 000000000..7f46810e0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go
@@ -0,0 +1,2067 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+import (
+	"bytes"
+	"context"
+	"database/sql/driver"
+	"errors"
+	"fmt"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type AnnotationType int64
+
+const (
+	AnnotationType_BOOL   AnnotationType = 0
+	AnnotationType_BYTES  AnnotationType = 1
+	AnnotationType_I16    AnnotationType = 2
+	AnnotationType_I32    AnnotationType = 3
+	AnnotationType_I64    AnnotationType = 4
+	AnnotationType_DOUBLE AnnotationType = 5
+	AnnotationType_STRING AnnotationType = 6
+)
+
+func (p AnnotationType) String() string {
+	switch p {
+	case AnnotationType_BOOL:
+		return "BOOL"
+	case AnnotationType_BYTES:
+		return "BYTES"
+	case AnnotationType_I16:
+		return "I16"
+	case AnnotationType_I32:
+		return "I32"
+	case AnnotationType_I64:
+		return "I64"
+	case AnnotationType_DOUBLE:
+		return "DOUBLE"
+	case AnnotationType_STRING:
+		return "STRING"
+	}
+	return "<UNSET>"
+}
+
+func AnnotationTypeFromString(s string) (AnnotationType, error) {
+	switch s {
+	case "BOOL":
+		return AnnotationType_BOOL, nil
+	case "BYTES":
+		return AnnotationType_BYTES, nil
+	case "I16":
+		return AnnotationType_I16, nil
+	case "I32":
+		return AnnotationType_I32, nil
+	case "I64":
+		return AnnotationType_I64, nil
+	case "DOUBLE":
+		return AnnotationType_DOUBLE, nil
+	case "STRING":
+		return AnnotationType_STRING, nil
+	}
+	return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
+}
+
+func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
+
+func (p AnnotationType) MarshalText() ([]byte, error) {
+	return []byte(p.String()), nil
+}
+
+func (p *AnnotationType) UnmarshalText(text []byte) error {
+	q, err := AnnotationTypeFromString(string(text))
+	if err != nil {
+		return err
+	}
+	*p = q
+	return nil
+}
+
+func (p *AnnotationType) Scan(value interface{}) error {
+	v, ok := value.(int64)
+	if !ok {
+		return errors.New("Scan value is not int64")
+	}
+	*p = AnnotationType(v)
+	return nil
+}
+
+func (p *AnnotationType) Value() (driver.Value, error) {
+	if p == nil {
+		return nil, nil
+	}
+	return int64(*p), nil
+}
+
+// Indicates the network context of a service recording an annotation with two
+// exceptions.
+//
+// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
+// the endpoint indicates the source or destination of an RPC. This exception
+// allows zipkin to display network context of uninstrumented services, or
+// clients such as web browsers.
+//
+// Attributes:
+//  - Ipv4: IPv4 host address packed into 4 bytes.
+//
+// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
+//  - Port: IPv4 port
+//
+// Note: this is to be treated as an unsigned integer, so watch for negatives.
+//
+// Conventionally, when the port isn't known, port = 0.
+//  - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
+//
+// Conventionally, when the service name isn't known, service_name = "unknown".
+//  - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
+type Endpoint struct {
+	Ipv4        int32  `thrift:"ipv4,1" db:"ipv4" json:"ipv4"`
+	Port        int16  `thrift:"port,2" db:"port" json:"port"`
+	ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"`
+	Ipv6        []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"`
+}
+
+func NewEndpoint() *Endpoint {
+	return &Endpoint{}
+}
+
+func (p *Endpoint) GetIpv4() int32 {
+	return p.Ipv4
+}
+
+func (p *Endpoint) GetPort() int16 {
+	return p.Port
+}
+
+func (p *Endpoint) GetServiceName() string {
+	return p.ServiceName
+}
+
+var Endpoint_Ipv6_DEFAULT []byte
+
+func (p *Endpoint) GetIpv6() []byte {
+	return p.Ipv6
+}
+func (p *Endpoint) IsSetIpv6() bool {
+	return p.Ipv6 != nil
+}
+
+func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.I32 {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.I16 {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 3:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField3(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 4:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField4(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Ipv4 = v
+	}
+	return nil
+}
+
+func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI16(ctx); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.Port = v
+	}
+	return nil
+}
+
+func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(ctx); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.ServiceName = v
+	}
+	return nil
+}
+
+func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBinary(ctx); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.Ipv6 = v
+	}
+	return nil
+}
+
+func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField3(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField4(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err)
+	}
+	if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err)
+	}
+	return err
+}
+
+func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err)
+	}
+	if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err)
+	}
+	return err
+}
+
+func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err)
+	}
+	if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err)
+	}
+	return err
+}
+
+func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetIpv6() {
+		if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err)
+		}
+		if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Endpoint) Equals(other *Endpoint) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.Ipv4 != other.Ipv4 {
+		return false
+	}
+	if p.Port != other.Port {
+		return false
+	}
+	if p.ServiceName != other.ServiceName {
+		return false
+	}
+	if bytes.Compare(p.Ipv6, other.Ipv6) != 0 {
+		return false
+	}
+	return true
+}
+
+func (p *Endpoint) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Endpoint(%+v)", *p)
+}
+
+// An annotation is similar to a log statement. It includes a host field which
+// allows these events to be attributed properly, and also aggregatable.
+//
+// Attributes:
+//  - Timestamp: Microseconds from epoch.
+//
+// This value should use the most precise value possible. For example,
+// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
+//  - Value
+//  - Host: Always the host that recorded the event. By specifying the host you allow
+// rollup of all events (such as client requests to a service) by IP address.
+type Annotation struct {
+	Timestamp int64     `thrift:"timestamp,1" db:"timestamp" json:"timestamp"`
+	Value     string    `thrift:"value,2" db:"value" json:"value"`
+	Host      *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"`
+}
+
+func NewAnnotation() *Annotation {
+	return &Annotation{}
+}
+
+func (p *Annotation) GetTimestamp() int64 {
+	return p.Timestamp
+}
+
+func (p *Annotation) GetValue() string {
+	return p.Value
+}
+
+var Annotation_Host_DEFAULT *Endpoint
+
+func (p *Annotation) GetHost() *Endpoint {
+	if !p.IsSetHost() {
+		return Annotation_Host_DEFAULT
+	}
+	return p.Host
+}
+func (p *Annotation) IsSetHost() bool {
+	return p.Host != nil
+}
+
+func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 3:
+			if fieldTypeId == thrift.STRUCT {
+				if err := p.ReadField3(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Timestamp = v
+	}
+	return nil
+}
+
+func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(ctx); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.Value = v
+	}
+	return nil
+}
+
+func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+	p.Host = &Endpoint{}
+	if err := p.Host.Read(ctx, iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+	}
+	return nil
+}
+
+func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField3(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
+	}
+	return err
+}
+
+func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
+	}
+	if err := oprot.WriteString(ctx, string(p.Value)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
+	}
+	return err
+}
+
+func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetHost() {
+		if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err)
+		}
+		if err := p.Host.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Annotation) Equals(other *Annotation) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.Timestamp != other.Timestamp {
+		return false
+	}
+	if p.Value != other.Value {
+		return false
+	}
+	if !p.Host.Equals(other.Host) {
+		return false
+	}
+	return true
+}
+
+func (p *Annotation) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Annotation(%+v)", *p)
+}
+
+// Binary annotations are tags applied to a Span to give it context. For
+// example, a binary annotation of "http.uri" could the path to a resource in a
+// RPC call.
+//
+// Binary annotations of type STRING are always queryable, though more a
+// historical implementation detail than a structural concern.
+//
+// Binary annotations can repeat, and vary on the host. Similar to Annotation,
+// the host indicates who logged the event. This allows you to tell the
+// difference between the client and server side of the same key. For example,
+// the key "http.uri" might be different on the client and server side due to
+// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
+// you can see the different points of view, which often help in debugging.
+//
+// Attributes:
+//  - Key
+//  - Value
+//  - AnnotationType
+//  - Host: The host that recorded tag, which allows you to differentiate between
+// multiple tags with the same key. There are two exceptions to this.
+//
+// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
+// destination of an RPC. This exception allows zipkin to display network
+// context of uninstrumented services, or clients such as web browsers.
+type BinaryAnnotation struct {
+	Key            string         `thrift:"key,1" db:"key" json:"key"`
+	Value          []byte         `thrift:"value,2" db:"value" json:"value"`
+	AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"`
+	Host           *Endpoint      `thrift:"host,4" db:"host" json:"host,omitempty"`
+}
+
+func NewBinaryAnnotation() *BinaryAnnotation {
+	return &BinaryAnnotation{}
+}
+
+func (p *BinaryAnnotation) GetKey() string {
+	return p.Key
+}
+
+func (p *BinaryAnnotation) GetValue() []byte {
+	return p.Value
+}
+
+func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
+	return p.AnnotationType
+}
+
+var BinaryAnnotation_Host_DEFAULT *Endpoint
+
+func (p *BinaryAnnotation) GetHost() *Endpoint {
+	if !p.IsSetHost() {
+		return BinaryAnnotation_Host_DEFAULT
+	}
+	return p.Host
+}
+func (p *BinaryAnnotation) IsSetHost() bool {
+	return p.Host != nil
+}
+
+func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField2(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 3:
+			if fieldTypeId == thrift.I32 {
+				if err := p.ReadField3(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 4:
+			if fieldTypeId == thrift.STRUCT {
+				if err := p.ReadField4(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Key = v
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBinary(ctx); err != nil {
+		return thrift.PrependError("error reading field 2: ", err)
+	} else {
+		p.Value = v
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI32(ctx); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		temp := AnnotationType(v)
+		p.AnnotationType = temp
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+	p.Host = &Endpoint{}
+	if err := p.Host.Read(ctx, iprot); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField2(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField3(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField4(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
+	}
+	if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
+	}
+	return err
+}
+
+func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
+	}
+	if err := oprot.WriteBinary(ctx, p.Value); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
+	}
+	return err
+}
+
+func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err)
+	}
+	if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err)
+	}
+	return err
+}
+
+func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetHost() {
+		if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err)
+		}
+		if err := p.Host.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.Key != other.Key {
+		return false
+	}
+	if bytes.Compare(p.Value, other.Value) != 0 {
+		return false
+	}
+	if p.AnnotationType != other.AnnotationType {
+		return false
+	}
+	if !p.Host.Equals(other.Host) {
+		return false
+	}
+	return true
+}
+
+func (p *BinaryAnnotation) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
+}
+
+// A trace is a series of spans (often RPC calls) which form a latency tree.
+//
+// The root span is where trace_id = id and parent_id = Nil. The root span is
+// usually the longest interval in the trace, starting with a SERVER_RECV
+// annotation and ending with a SERVER_SEND.
+//
+// Attributes:
+//  - TraceID
+//  - Name: Span name in lowercase, rpc method for example
+//
+// Conventionally, when the span name isn't known, name = "unknown".
+//  - ID
+//  - ParentID
+//  - Annotations
+//  - BinaryAnnotations
+//  - Debug
+//  - Timestamp: Microseconds from epoch of the creation of this span.
+//
+// This value should be set directly by instrumentation, using the most
+// precise value possible. For example, gettimeofday or syncing nanoTime
+// against a tick of currentTimeMillis.
+//
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this via Annotation.timestamp.
+// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
+//
+// This field is optional for compatibility with old data: first-party span
+// stores are expected to support this at time of introduction.
+//  - Duration: Measurement of duration in microseconds, used to support queries.
+//
+// This value should be set directly, where possible. Doing so encourages
+// precise measurement decoupled from problems of clocks, such as skew or NTP
+// updates causing time to move backwards.
+//
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this by subtracting Annotation.timestamp.
+// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
+//
+// If this field is persisted as unset, zipkin will continue to work, except
+// duration query support will be implementation-specific. Similarly, setting
+// this field non-atomically is implementation-specific.
+//
+// This field is i64 vs i32 to support spans longer than 35 minutes.
+//  - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
+// means the trace uses 128 bit traceIds instead of 64 bit.
+type Span struct {
+	TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"`
+	// unused field # 2
+	Name        string        `thrift:"name,3" db:"name" json:"name"`
+	ID          int64         `thrift:"id,4" db:"id" json:"id"`
+	ParentID    *int64        `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"`
+	Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"`
+	// unused field # 7
+	BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"`
+	Debug             bool                `thrift:"debug,9" db:"debug" json:"debug"`
+	Timestamp         *int64              `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"`
+	Duration          *int64              `thrift:"duration,11" db:"duration" json:"duration,omitempty"`
+	TraceIDHigh       *int64              `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"`
+}
+
+func NewSpan() *Span {
+	return &Span{}
+}
+
+func (p *Span) GetTraceID() int64 {
+	return p.TraceID
+}
+
+func (p *Span) GetName() string {
+	return p.Name
+}
+
+func (p *Span) GetID() int64 {
+	return p.ID
+}
+
+var Span_ParentID_DEFAULT int64
+
+func (p *Span) GetParentID() int64 {
+	if !p.IsSetParentID() {
+		return Span_ParentID_DEFAULT
+	}
+	return *p.ParentID
+}
+
+func (p *Span) GetAnnotations() []*Annotation {
+	return p.Annotations
+}
+
+func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
+	return p.BinaryAnnotations
+}
+
+var Span_Debug_DEFAULT bool = false
+
+func (p *Span) GetDebug() bool {
+	return p.Debug
+}
+
+var Span_Timestamp_DEFAULT int64
+
+func (p *Span) GetTimestamp() int64 {
+	if !p.IsSetTimestamp() {
+		return Span_Timestamp_DEFAULT
+	}
+	return *p.Timestamp
+}
+
+var Span_Duration_DEFAULT int64
+
+func (p *Span) GetDuration() int64 {
+	if !p.IsSetDuration() {
+		return Span_Duration_DEFAULT
+	}
+	return *p.Duration
+}
+
+var Span_TraceIDHigh_DEFAULT int64
+
+func (p *Span) GetTraceIDHigh() int64 {
+	if !p.IsSetTraceIDHigh() {
+		return Span_TraceIDHigh_DEFAULT
+	}
+	return *p.TraceIDHigh
+}
+func (p *Span) IsSetParentID() bool {
+	return p.ParentID != nil
+}
+
+func (p *Span) IsSetDebug() bool {
+	return p.Debug != Span_Debug_DEFAULT
+}
+
+func (p *Span) IsSetTimestamp() bool {
+	return p.Timestamp != nil
+}
+
+func (p *Span) IsSetDuration() bool {
+	return p.Duration != nil
+}
+
+func (p *Span) IsSetTraceIDHigh() bool {
+	return p.TraceIDHigh != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 3:
+			if fieldTypeId == thrift.STRING {
+				if err := p.ReadField3(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 4:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField4(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 5:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField5(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 6:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField6(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 8:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField8(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 9:
+			if fieldTypeId == thrift.BOOL {
+				if err := p.ReadField9(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 10:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField10(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 11:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField11(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		case 12:
+			if fieldTypeId == thrift.I64 {
+				if err := p.ReadField12(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.TraceID = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadString(ctx); err != nil {
+		return thrift.PrependError("error reading field 3: ", err)
+	} else {
+		p.Name = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 4: ", err)
+	} else {
+		p.ID = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 5: ", err)
+	} else {
+		p.ParentID = &v
+	}
+	return nil
+}
+
+func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Annotation, 0, size)
+	p.Annotations = tSlice
+	for i := 0; i < size; i++ {
+		_elem0 := &Annotation{}
+		if err := _elem0.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+		}
+		p.Annotations = append(p.Annotations, _elem0)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*BinaryAnnotation, 0, size)
+	p.BinaryAnnotations = tSlice
+	for i := 0; i < size; i++ {
+		_elem1 := &BinaryAnnotation{}
+		if err := _elem1.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+		}
+		p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBool(ctx); err != nil {
+		return thrift.PrependError("error reading field 9: ", err)
+	} else {
+		p.Debug = v
+	}
+	return nil
+}
+
+func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 10: ", err)
+	} else {
+		p.Timestamp = &v
+	}
+	return nil
+}
+
+func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 11: ", err)
+	} else {
+		p.Duration = &v
+	}
+	return nil
+}
+
+func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadI64(ctx); err != nil {
+		return thrift.PrependError("error reading field 12: ", err)
+	} else {
+		p.TraceIDHigh = &v
+	}
+	return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField3(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField4(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField5(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField6(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField8(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField9(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField10(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField11(ctx, oprot); err != nil {
+			return err
+		}
+		if err := p.writeField12(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err)
+	}
+	if err := oprot.WriteString(ctx, string(p.Name)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err)
+	}
+	if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetParentID() {
+		if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err)
+		}
+		if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err)
+	}
+	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Annotations {
+		if err := v.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(ctx); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err)
+	}
+	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.BinaryAnnotations {
+		if err := v.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(ctx); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err)
+	}
+	return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetDebug() {
+		if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err)
+		}
+		if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetTimestamp() {
+		if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err)
+		}
+		if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetDuration() {
+		if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err)
+		}
+		if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetTraceIDHigh() {
+		if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err)
+		}
+		if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.TraceID != other.TraceID {
+		return false
+	}
+	if p.Name != other.Name {
+		return false
+	}
+	if p.ID != other.ID {
+		return false
+	}
+	if p.ParentID != other.ParentID {
+		if p.ParentID == nil || other.ParentID == nil {
+			return false
+		}
+		if (*p.ParentID) != (*other.ParentID) {
+			return false
+		}
+	}
+	if len(p.Annotations) != len(other.Annotations) {
+		return false
+	}
+	for i, _tgt := range p.Annotations {
+		_src2 := other.Annotations[i]
+		if !_tgt.Equals(_src2) {
+			return false
+		}
+	}
+	if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) {
+		return false
+	}
+	for i, _tgt := range p.BinaryAnnotations {
+		_src3 := other.BinaryAnnotations[i]
+		if !_tgt.Equals(_src3) {
+			return false
+		}
+	}
+	if p.Debug != other.Debug {
+		return false
+	}
+	if p.Timestamp != other.Timestamp {
+		if p.Timestamp == nil || other.Timestamp == nil {
+			return false
+		}
+		if (*p.Timestamp) != (*other.Timestamp) {
+			return false
+		}
+	}
+	if p.Duration != other.Duration {
+		if p.Duration == nil || other.Duration == nil {
+			return false
+		}
+		if (*p.Duration) != (*other.Duration) {
+			return false
+		}
+	}
+	if p.TraceIDHigh != other.TraceIDHigh {
+		if p.TraceIDHigh == nil || other.TraceIDHigh == nil {
+			return false
+		}
+		if (*p.TraceIDHigh) != (*other.TraceIDHigh) {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *Span) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+//  - Ok
+type Response struct {
+	Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewResponse() *Response {
+	return &Response{}
+}
+
+func (p *Response) GetOk() bool {
+	return p.Ok
+}
+func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	var issetOk bool = false
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.BOOL {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+				issetOk = true
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	if !issetOk {
+		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
+	}
+	return nil
+}
+
+func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	if v, err := iprot.ReadBool(ctx); err != nil {
+		return thrift.PrependError("error reading field 1: ", err)
+	} else {
+		p.Ok = v
+	}
+	return nil
+}
+
+func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "Response"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
+	}
+	if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
+	}
+	return err
+}
+
+func (p *Response) Equals(other *Response) bool {
+	if p == other {
+		return true
+	} else if p == nil || other == nil {
+		return false
+	}
+	if p.Ok != other.Ok {
+		return false
+	}
+	return true
+}
+
+func (p *Response) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("Response(%+v)", *p)
+}
+
+type ZipkinCollector interface {
+	// Parameters:
+	//  - Spans
+	SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error)
+}
+
+type ZipkinCollectorClient struct {
+	c    thrift.TClient
+	meta thrift.ResponseMeta
+}
+
+func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
+	return &ZipkinCollectorClient{
+		c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+	}
+}
+
+func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
+	return &ZipkinCollectorClient{
+		c: thrift.NewTStandardClient(iprot, oprot),
+	}
+}
+
+func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient {
+	return &ZipkinCollectorClient{
+		c: c,
+	}
+}
+
+func (p *ZipkinCollectorClient) Client_() thrift.TClient {
+	return p.c
+}
+
+func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+	return p.meta
+}
+
+func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+	p.meta = meta
+}
+
+// Parameters:
+//  - Spans
+func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) {
+	var _args4 ZipkinCollectorSubmitZipkinBatchArgs
+	_args4.Spans = spans
+	var _result6 ZipkinCollectorSubmitZipkinBatchResult
+	var _meta5 thrift.ResponseMeta
+	_meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6)
+	p.SetLastResponseMeta_(_meta5)
+	if _err != nil {
+		return
+	}
+	return _result6.GetSuccess(), nil
+}
+
+type ZipkinCollectorProcessor struct {
+	processorMap map[string]thrift.TProcessorFunction
+	handler      ZipkinCollector
+}
+
+func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+	p.processorMap[key] = processor
+}
+
+func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+	processor, ok = p.processorMap[key]
+	return processor, ok
+}
+
+func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+	return p.processorMap
+}
+
+func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
+
+	self7 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+	self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler}
+	return self7
+}
+
+func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+	if err2 != nil {
+		return false, thrift.WrapTException(err2)
+	}
+	if processor, ok := p.GetProcessorFunction(name); ok {
+		return processor.Process(ctx, seqId, iprot, oprot)
+	}
+	iprot.Skip(ctx, thrift.STRUCT)
+	iprot.ReadMessageEnd(ctx)
+	x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+	oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+	x8.Write(ctx, oprot)
+	oprot.WriteMessageEnd(ctx)
+	oprot.Flush(ctx)
+	return false, x8
+
+}
+
+type zipkinCollectorProcessorSubmitZipkinBatch struct {
+	handler ZipkinCollector
+}
+
+func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+	args := ZipkinCollectorSubmitZipkinBatchArgs{}
+	var err2 error
+	if err2 = args.Read(ctx, iprot); err2 != nil {
+		iprot.ReadMessageEnd(ctx)
+		x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+		oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+		x.Write(ctx, oprot)
+		oprot.WriteMessageEnd(ctx)
+		oprot.Flush(ctx)
+		return false, thrift.WrapTException(err2)
+	}
+	iprot.ReadMessageEnd(ctx)
+
+	tickerCancel := func() {}
+	// Start a goroutine to do server side connectivity check.
+	if thrift.ServerConnectivityCheckInterval > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithCancel(ctx)
+		defer cancel()
+		var tickerCtx context.Context
+		tickerCtx, tickerCancel = context.WithCancel(context.Background())
+		defer tickerCancel()
+		go func(ctx context.Context, cancel context.CancelFunc) {
+			ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+			defer ticker.Stop()
+			for {
+				select {
+				case <-ctx.Done():
+					return
+				case <-ticker.C:
+					if !iprot.Transport().IsOpen() {
+						cancel()
+						return
+					}
+				}
+			}
+		}(tickerCtx, cancel)
+	}
+
+	result := ZipkinCollectorSubmitZipkinBatchResult{}
+	var retval []*Response
+	if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil {
+		tickerCancel()
+		if err2 == thrift.ErrAbandonRequest {
+			return false, thrift.WrapTException(err2)
+		}
+		x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error())
+		oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+		x.Write(ctx, oprot)
+		oprot.WriteMessageEnd(ctx)
+		oprot.Flush(ctx)
+		return true, thrift.WrapTException(err2)
+	} else {
+		result.Success = retval
+	}
+	tickerCancel()
+	if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
+		err = thrift.WrapTException(err2)
+	}
+	if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+		err = thrift.WrapTException(err2)
+	}
+	if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+		err = thrift.WrapTException(err2)
+	}
+	if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+		err = thrift.WrapTException(err2)
+	}
+	if err != nil {
+		return
+	}
+	return true, err
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Spans
+type ZipkinCollectorSubmitZipkinBatchArgs struct {
+	Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
+	return &ZipkinCollectorSubmitZipkinBatchArgs{}
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
+	return p.Spans
+}
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 1:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField1(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Span, 0, size)
+	p.Spans = tSlice
+	for i := 0; i < size; i++ {
+		_elem9 := &Span{}
+		if err := _elem9.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err)
+		}
+		p.Spans = append(p.Spans, _elem9)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField1(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
+	}
+	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+		return thrift.PrependError("error writing list begin: ", err)
+	}
+	for _, v := range p.Spans {
+		if err := v.Write(ctx, oprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+		}
+	}
+	if err := oprot.WriteListEnd(ctx); err != nil {
+		return thrift.PrependError("error writing list end: ", err)
+	}
+	if err := oprot.WriteFieldEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
+	}
+	return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type ZipkinCollectorSubmitZipkinBatchResult struct {
+	Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
+	return &ZipkinCollectorSubmitZipkinBatchResult{}
+}
+
+var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
+	return p.Success
+}
+func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
+	return p.Success != nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+	if _, err := iprot.ReadStructBegin(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+	}
+
+	for {
+		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+		}
+		if fieldTypeId == thrift.STOP {
+			break
+		}
+		switch fieldId {
+		case 0:
+			if fieldTypeId == thrift.LIST {
+				if err := p.ReadField0(ctx, iprot); err != nil {
+					return err
+				}
+			} else {
+				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+					return err
+				}
+			}
+		default:
+			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+				return err
+			}
+		}
+		if err := iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+	_, size, err := iprot.ReadListBegin(ctx)
+	if err != nil {
+		return thrift.PrependError("error reading list begin: ", err)
+	}
+	tSlice := make([]*Response, 0, size)
+	p.Success = tSlice
+	for i := 0; i < size; i++ {
+		_elem10 := &Response{}
+		if err := _elem10.Read(ctx, iprot); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+		}
+		p.Success = append(p.Success, _elem10)
+	}
+	if err := iprot.ReadListEnd(ctx); err != nil {
+		return thrift.PrependError("error reading list end: ", err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+	if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil {
+		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+	}
+	if p != nil {
+		if err := p.writeField0(ctx, oprot); err != nil {
+			return err
+		}
+	}
+	if err := oprot.WriteFieldStop(ctx); err != nil {
+		return thrift.PrependError("write field stop error: ", err)
+	}
+	if err := oprot.WriteStructEnd(ctx); err != nil {
+		return thrift.PrependError("write struct stop error: ", err)
+	}
+	return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+	if p.IsSetSuccess() {
+		if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
+		}
+		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+			return thrift.PrependError("error writing list begin: ", err)
+		}
+		for _, v := range p.Success {
+			if err := v.Write(ctx, oprot); err != nil {
+				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+			}
+		}
+		if err := oprot.WriteListEnd(ctx); err != nil {
+			return thrift.PrependError("error writing list end: ", err)
+		}
+		if err := oprot.WriteFieldEnd(ctx); err != nil {
+			return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
+		}
+	}
+	return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE
new file mode 100644
index 000000000..2bc6fbbf6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE
@@ -0,0 +1,306 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+--------------------------------------------------
+SOFTWARE DISTRIBUTED WITH THRIFT:
+
+The Apache Thrift software includes a number of subcomponents with
+separate copyright notices and license terms. Your use of the source
+code for the these subcomponents is subject to the terms and
+conditions of the following licenses.
+
+--------------------------------------------------
+Portions of the following files are licensed under the MIT License:
+
+  lib/erl/src/Makefile.am
+
+Please see doc/otp-base-license.txt for the full terms of this license.
+
+--------------------------------------------------
+For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
+
+#   Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
+#
+#   Copying and distribution of this file, with or without
+#   modification, are permitted in any medium without royalty provided
+#   the copyright notice and this notice are preserved.
+
+--------------------------------------------------
+For the lib/nodejs/lib/thrift/json_parse.js:
+
+/*
+    json_parse.js
+    2015-05-02
+    Public Domain.
+    NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+*/
+(By Douglas Crockford <douglas@crockford.com>)
+
+--------------------------------------------------
+For lib/cpp/src/thrift/windows/SocketPair.cpp
+
+/* socketpair.c
+ * Copyright 2007 by Nathan C. Myers <ncm@cantrip.org>; some rights reserved.
+ * This code is Free Software.  It may be copied freely, in original or
+ * modified form, subject only to the restrictions that (1) the author is
+ * relieved from all responsibilities for any use for any purpose, and (2)
+ * this copyright notice must be retained, unchanged, in its entirety.  If
+ * for any reason the author might be held responsible for any consequences
+ * of copying or use, license is withheld.
+ */
+
+
+--------------------------------------------------
+For lib/py/compat/win32/stdint.h
+
+// ISO C9x  compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+//  Copyright (c) 2006-2008 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+//   1. Redistributions of source code must retain the above copyright notice,
+//      this list of conditions and the following disclaimer.
+//
+//   2. Redistributions in binary form must reproduce the above copyright
+//      notice, this list of conditions and the following disclaimer in the
+//      documentation and/or other materials provided with the distribution.
+//
+//   3. The name of the author may be used to endorse or promote products
+//      derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+--------------------------------------------------
+Codegen template in t_html_generator.h
+
+* Bootstrap v2.0.3
+*
+* Copyright 2012 Twitter, Inc
+* Licensed under the Apache License v2.0
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Designed and built with all the love in the world @twitter by @mdo and @fat.
+
+---------------------------------------------------
+For t_cl_generator.cc
+
+ * Copyright (c) 2008- Patrick Collison <patrick@collison.ie>
+ * Copyright (c) 2006- Facebook
+
+---------------------------------------------------
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE
new file mode 100644
index 000000000..37824e7fb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE
@@ -0,0 +1,5 @@
+Apache Thrift
+Copyright (C) 2006 - 2019, The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go
new file mode 100644
index 000000000..32d5b0147
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+)
+
+const (
+	UNKNOWN_APPLICATION_EXCEPTION  = 0
+	UNKNOWN_METHOD                 = 1
+	INVALID_MESSAGE_TYPE_EXCEPTION = 2
+	WRONG_METHOD_NAME              = 3
+	BAD_SEQUENCE_ID                = 4
+	MISSING_RESULT                 = 5
+	INTERNAL_ERROR                 = 6
+	PROTOCOL_ERROR                 = 7
+	INVALID_TRANSFORM              = 8
+	INVALID_PROTOCOL               = 9
+	UNSUPPORTED_CLIENT_TYPE        = 10
+)
+
+var defaultApplicationExceptionMessage = map[int32]string{
+	UNKNOWN_APPLICATION_EXCEPTION:  "unknown application exception",
+	UNKNOWN_METHOD:                 "unknown method",
+	INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
+	WRONG_METHOD_NAME:              "wrong method name",
+	BAD_SEQUENCE_ID:                "bad sequence ID",
+	MISSING_RESULT:                 "missing result",
+	INTERNAL_ERROR:                 "unknown internal error",
+	PROTOCOL_ERROR:                 "unknown protocol error",
+	INVALID_TRANSFORM:              "Invalid transform",
+	INVALID_PROTOCOL:               "Invalid protocol",
+	UNSUPPORTED_CLIENT_TYPE:        "Unsupported client type",
+}
+
+// Application level Thrift exception
+type TApplicationException interface {
+	TException
+	TypeId() int32
+	Read(ctx context.Context, iprot TProtocol) error
+	Write(ctx context.Context, oprot TProtocol) error
+}
+
+type tApplicationException struct {
+	message string
+	type_   int32
+}
+
+var _ TApplicationException = (*tApplicationException)(nil)
+
+func (tApplicationException) TExceptionType() TExceptionType {
+	return TExceptionTypeApplication
+}
+
+func (e tApplicationException) Error() string {
+	if e.message != "" {
+		return e.message
+	}
+	return defaultApplicationExceptionMessage[e.type_]
+}
+
+func NewTApplicationException(type_ int32, message string) TApplicationException {
+	return &tApplicationException{message, type_}
+}
+
+func (p *tApplicationException) TypeId() int32 {
+	return p.type_
+}
+
+func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
+	// TODO: this should really be generated by the compiler
+	_, err := iprot.ReadStructBegin(ctx)
+	if err != nil {
+		return err
+	}
+
+	message := ""
+	type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
+
+	for {
+		_, ttype, id, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return err
+		}
+		if ttype == STOP {
+			break
+		}
+		switch id {
+		case 1:
+			if ttype == STRING {
+				if message, err = iprot.ReadString(ctx); err != nil {
+					return err
+				}
+			} else {
+				if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if ttype == I32 {
+				if type_, err = iprot.ReadI32(ctx); err != nil {
+					return err
+				}
+			} else {
+				if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+					return err
+				}
+			}
+		default:
+			if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+				return err
+			}
+		}
+		if err = iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return err
+	}
+
+	p.message = message
+	p.type_ = type_
+
+	return nil
+}
+
+func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
+	err = oprot.WriteStructBegin(ctx, "TApplicationException")
+	if len(p.Error()) > 0 {
+		err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
+		if err != nil {
+			return
+		}
+		err = oprot.WriteString(ctx, p.Error())
+		if err != nil {
+			return
+		}
+		err = oprot.WriteFieldEnd(ctx)
+		if err != nil {
+			return
+		}
+	}
+	err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteI32(ctx, p.type_)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteFieldEnd(ctx)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteFieldStop(ctx)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteStructEnd(ctx)
+	return
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go
new file mode 100644
index 000000000..45c880d32
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go
@@ -0,0 +1,555 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+)
+
+type TBinaryProtocol struct {
+	trans         TRichTransport
+	origTransport TTransport
+	cfg           *TConfiguration
+	buffer        [64]byte
+}
+
+type TBinaryProtocolFactory struct {
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTBinaryProtocolConf instead.
+func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
+	return NewTBinaryProtocolConf(t, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// Deprecated: Use NewTBinaryProtocolConf instead.
+func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
+	return NewTBinaryProtocolConf(t, &TConfiguration{
+		TBinaryStrictRead:  &strictRead,
+		TBinaryStrictWrite: &strictWrite,
+
+		noPropagation: true,
+	})
+}
+
+func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
+	PropagateTConfiguration(t, conf)
+	p := &TBinaryProtocol{
+		origTransport: t,
+		cfg:           conf,
+	}
+	if et, ok := t.(TRichTransport); ok {
+		p.trans = et
+	} else {
+		p.trans = NewTRichTransport(t)
+	}
+	return p
+}
+
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
+func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
+	return NewTBinaryProtocolFactoryConf(&TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
+func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
+	return NewTBinaryProtocolFactoryConf(&TConfiguration{
+		TBinaryStrictRead:  &strictRead,
+		TBinaryStrictWrite: &strictWrite,
+
+		noPropagation: true,
+	})
+}
+
+func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
+	return &TBinaryProtocolFactory{
+		cfg: conf,
+	}
+}
+
+func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
+	return NewTBinaryProtocolConf(t, p.cfg)
+}
+
+func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+	p.cfg = conf
+}
+
+/**
+ * Writing Methods
+ */
+
+func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
+	if p.cfg.GetTBinaryStrictWrite() {
+		version := uint32(VERSION_1) | uint32(typeId)
+		e := p.WriteI32(ctx, int32(version))
+		if e != nil {
+			return e
+		}
+		e = p.WriteString(ctx, name)
+		if e != nil {
+			return e
+		}
+		e = p.WriteI32(ctx, seqId)
+		return e
+	} else {
+		e := p.WriteString(ctx, name)
+		if e != nil {
+			return e
+		}
+		e = p.WriteByte(ctx, int8(typeId))
+		if e != nil {
+			return e
+		}
+		e = p.WriteI32(ctx, seqId)
+		return e
+	}
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	e := p.WriteByte(ctx, int8(typeId))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI16(ctx, id)
+	return e
+}
+
+func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
+	e := p.WriteByte(ctx, STOP)
+	return e
+}
+
+func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	e := p.WriteByte(ctx, int8(keyType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteByte(ctx, int8(valueType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(ctx, int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	e := p.WriteByte(ctx, int8(elemType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(ctx, int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	e := p.WriteByte(ctx, int8(elemType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(ctx, int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
+	if value {
+		return p.WriteByte(ctx, 1)
+	}
+	return p.WriteByte(ctx, 0)
+}
+
+func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
+	e := p.trans.WriteByte(byte(value))
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
+	v := p.buffer[0:2]
+	binary.BigEndian.PutUint16(v, uint16(value))
+	_, e := p.trans.Write(v)
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
+	v := p.buffer[0:4]
+	binary.BigEndian.PutUint32(v, uint32(value))
+	_, e := p.trans.Write(v)
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
+	v := p.buffer[0:8]
+	binary.BigEndian.PutUint64(v, uint64(value))
+	_, err := p.trans.Write(v)
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
+	return p.WriteI64(ctx, int64(math.Float64bits(value)))
+}
+
+func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
+	e := p.WriteI32(ctx, int32(len(value)))
+	if e != nil {
+		return e
+	}
+	_, err := p.trans.WriteString(value)
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
+	e := p.WriteI32(ctx, int32(len(value)))
+	if e != nil {
+		return e
+	}
+	_, err := p.trans.Write(value)
+	return NewTProtocolException(err)
+}
+
+/**
+ * Reading methods
+ */
+
+func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+	size, e := p.ReadI32(ctx)
+	if e != nil {
+		return "", typeId, 0, NewTProtocolException(e)
+	}
+	if size < 0 {
+		typeId = TMessageType(size & 0x0ff)
+		version := int64(int64(size) & VERSION_MASK)
+		if version != VERSION_1 {
+			return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
+		}
+		name, e = p.ReadString(ctx)
+		if e != nil {
+			return name, typeId, seqId, NewTProtocolException(e)
+		}
+		seqId, e = p.ReadI32(ctx)
+		if e != nil {
+			return name, typeId, seqId, NewTProtocolException(e)
+		}
+		return name, typeId, seqId, nil
+	}
+	if p.cfg.GetTBinaryStrictRead() {
+		return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
+	}
+	name, e2 := p.readStringBody(size)
+	if e2 != nil {
+		return name, typeId, seqId, e2
+	}
+	b, e3 := p.ReadByte(ctx)
+	if e3 != nil {
+		return name, typeId, seqId, e3
+	}
+	typeId = TMessageType(b)
+	seqId, e4 := p.ReadI32(ctx)
+	if e4 != nil {
+		return name, typeId, seqId, e4
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	return
+}
+
+func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
+	t, err := p.ReadByte(ctx)
+	typeId = TType(t)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if t != STOP {
+		seqId, err = p.ReadI16(ctx)
+	}
+	return name, typeId, seqId, err
+}
+
+func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
+	return nil
+}
+
+var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
+
+func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
+	k, e := p.ReadByte(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	kType = TType(k)
+	v, e := p.ReadByte(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	vType = TType(v)
+	size32, e := p.ReadI32(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+	return kType, vType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+	b, e := p.ReadByte(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	elemType = TType(b)
+	size32, e := p.ReadI32(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+
+	return
+}
+
+func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	b, e := p.ReadByte(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	elemType = TType(b)
+	size32, e := p.ReadI32(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+	return elemType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
+	b, e := p.ReadByte(ctx)
+	v := true
+	if b != 1 {
+		v = false
+	}
+	return v, e
+}
+
+func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
+	v, err := p.trans.ReadByte()
+	return int8(v), err
+}
+
+func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+	buf := p.buffer[0:2]
+	err = p.readAll(ctx, buf)
+	value = int16(binary.BigEndian.Uint16(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+	buf := p.buffer[0:4]
+	err = p.readAll(ctx, buf)
+	value = int32(binary.BigEndian.Uint32(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+	buf := p.buffer[0:8]
+	err = p.readAll(ctx, buf)
+	value = int64(binary.BigEndian.Uint64(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+	buf := p.buffer[0:8]
+	err = p.readAll(ctx, buf)
+	value = math.Float64frombits(binary.BigEndian.Uint64(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
+	size, e := p.ReadI32(ctx)
+	if e != nil {
+		return "", e
+	}
+	err = checkSizeForProtocol(size, p.cfg)
+	if err != nil {
+		return
+	}
+	if size < 0 {
+		err = invalidDataLength
+		return
+	}
+	if size == 0 {
+		return "", nil
+	}
+	if size < int32(len(p.buffer)) {
+		// Avoid allocation on small reads
+		buf := p.buffer[:size]
+		read, e := io.ReadFull(p.trans, buf)
+		return string(buf[:read]), NewTProtocolException(e)
+	}
+
+	return p.readStringBody(size)
+}
+
+func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
+	size, e := p.ReadI32(ctx)
+	if e != nil {
+		return nil, e
+	}
+	if err := checkSizeForProtocol(size, p.cfg); err != nil {
+		return nil, err
+	}
+
+	buf, err := safeReadBytes(size, p.trans)
+	return buf, NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
+	return NewTProtocolException(p.trans.Flush(ctx))
+}
+
+func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TBinaryProtocol) Transport() TTransport {
+	return p.origTransport
+}
+
+func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
+	var read int
+	_, deadlineSet := ctx.Deadline()
+	for {
+		read, err = io.ReadFull(p.trans, buf)
+		if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
+			// This is I/O timeout without anything read,
+			// and we still have time left, keep retrying.
+			continue
+		}
+		// For anything else, don't retry
+		break
+	}
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
+	buf, err := safeReadBytes(size, p.trans)
+	return string(buf), NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.trans, conf)
+	PropagateTConfiguration(p.origTransport, conf)
+	p.cfg = conf
+}
+
+var (
+	_ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
+	_ TConfigurationSetter = (*TBinaryProtocol)(nil)
+)
+
+// This function is shared between TBinaryProtocol and TCompactProtocol.
+//
+// It tries to read size bytes from trans, in a way that prevents large
+// allocations when size is insanely large (mostly caused by malformed message).
+func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
+	if size < 0 {
+		return nil, nil
+	}
+
+	buf := new(bytes.Buffer)
+	_, err := io.CopyN(buf, trans, int64(size))
+	return buf.Bytes(), err
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go
new file mode 100644
index 000000000..aa551b4ab
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"context"
+)
+
+type TBufferedTransportFactory struct {
+	size int
+}
+
+type TBufferedTransport struct {
+	bufio.ReadWriter
+	tp TTransport
+}
+
+func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	return NewTBufferedTransport(trans, p.size), nil
+}
+
+func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
+	return &TBufferedTransportFactory{size: bufferSize}
+}
+
+func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
+	return &TBufferedTransport{
+		ReadWriter: bufio.ReadWriter{
+			Reader: bufio.NewReaderSize(trans, bufferSize),
+			Writer: bufio.NewWriterSize(trans, bufferSize),
+		},
+		tp: trans,
+	}
+}
+
+func (p *TBufferedTransport) IsOpen() bool {
+	return p.tp.IsOpen()
+}
+
+func (p *TBufferedTransport) Open() (err error) {
+	return p.tp.Open()
+}
+
+func (p *TBufferedTransport) Close() (err error) {
+	return p.tp.Close()
+}
+
+func (p *TBufferedTransport) Read(b []byte) (int, error) {
+	n, err := p.ReadWriter.Read(b)
+	if err != nil {
+		p.ReadWriter.Reader.Reset(p.tp)
+	}
+	return n, err
+}
+
+func (p *TBufferedTransport) Write(b []byte) (int, error) {
+	n, err := p.ReadWriter.Write(b)
+	if err != nil {
+		p.ReadWriter.Writer.Reset(p.tp)
+	}
+	return n, err
+}
+
+func (p *TBufferedTransport) Flush(ctx context.Context) error {
+	if err := p.ReadWriter.Flush(); err != nil {
+		p.ReadWriter.Writer.Reset(p.tp)
+		return err
+	}
+	return p.tp.Flush(ctx)
+}
+
+func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
+	return p.tp.RemainingBytes()
+}
+
+// SetTConfiguration implements TConfigurationSetter for propagation.
+func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.tp, conf)
+}
+
+var _ TConfigurationSetter = (*TBufferedTransport)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go
new file mode 100644
index 000000000..ea2c01fda
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go
@@ -0,0 +1,109 @@
+package thrift
+
+import (
+	"context"
+	"fmt"
+)
+
+// ResponseMeta represents the metadata attached to the response.
+type ResponseMeta struct {
+	// The headers in the response, if any.
+	// If the underlying transport/protocol is not THeader, this will always be nil.
+	Headers THeaderMap
+}
+
+type TClient interface {
+	Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
+}
+
+type TStandardClient struct {
+	seqId        int32
+	iprot, oprot TProtocol
+}
+
+// TStandardClient implements TClient, and uses the standard message format for Thrift.
+// It is not safe for concurrent use.
+func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
+	return &TStandardClient{
+		iprot: inputProtocol,
+		oprot: outputProtocol,
+	}
+}
+
+func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
+	// Set headers from context object on THeaderProtocol
+	if headerProt, ok := oprot.(*THeaderProtocol); ok {
+		headerProt.ClearWriteHeaders()
+		for _, key := range GetWriteHeaderList(ctx) {
+			if value, ok := GetHeader(ctx, key); ok {
+				headerProt.SetWriteHeader(key, value)
+			}
+		}
+	}
+
+	if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
+		return err
+	}
+	if err := args.Write(ctx, oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteMessageEnd(ctx); err != nil {
+		return err
+	}
+	return oprot.Flush(ctx)
+}
+
+func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
+	rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
+	if err != nil {
+		return err
+	}
+
+	if method != rMethod {
+		return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
+	} else if seqId != rSeqId {
+		return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
+	} else if rTypeId == EXCEPTION {
+		var exception tApplicationException
+		if err := exception.Read(ctx, iprot); err != nil {
+			return err
+		}
+
+		if err := iprot.ReadMessageEnd(ctx); err != nil {
+			return err
+		}
+
+		return &exception
+	} else if rTypeId != REPLY {
+		return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
+	}
+
+	if err := result.Read(ctx, iprot); err != nil {
+		return err
+	}
+
+	return iprot.ReadMessageEnd(ctx)
+}
+
+func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
+	p.seqId++
+	seqId := p.seqId
+
+	if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
+		return ResponseMeta{}, err
+	}
+
+	// method is oneway
+	if result == nil {
+		return ResponseMeta{}, nil
+	}
+
+	err := p.Recv(ctx, p.iprot, seqId, method, result)
+	var headers THeaderMap
+	if hp, ok := p.iprot.(*THeaderProtocol); ok {
+		headers = hp.transport.readHeaders
+	}
+	return ResponseMeta{
+		Headers: headers,
+	}, err
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go
new file mode 100644
index 000000000..a49225dab
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go
@@ -0,0 +1,865 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+)
+
+const (
+	COMPACT_PROTOCOL_ID       = 0x082
+	COMPACT_VERSION           = 1
+	COMPACT_VERSION_MASK      = 0x1f
+	COMPACT_TYPE_MASK         = 0x0E0
+	COMPACT_TYPE_BITS         = 0x07
+	COMPACT_TYPE_SHIFT_AMOUNT = 5
+)
+
+type tCompactType byte
+
+const (
+	COMPACT_BOOLEAN_TRUE  = 0x01
+	COMPACT_BOOLEAN_FALSE = 0x02
+	COMPACT_BYTE          = 0x03
+	COMPACT_I16           = 0x04
+	COMPACT_I32           = 0x05
+	COMPACT_I64           = 0x06
+	COMPACT_DOUBLE        = 0x07
+	COMPACT_BINARY        = 0x08
+	COMPACT_LIST          = 0x09
+	COMPACT_SET           = 0x0A
+	COMPACT_MAP           = 0x0B
+	COMPACT_STRUCT        = 0x0C
+)
+
+var (
+	ttypeToCompactType map[TType]tCompactType
+)
+
+func init() {
+	ttypeToCompactType = map[TType]tCompactType{
+		STOP:   STOP,
+		BOOL:   COMPACT_BOOLEAN_TRUE,
+		BYTE:   COMPACT_BYTE,
+		I16:    COMPACT_I16,
+		I32:    COMPACT_I32,
+		I64:    COMPACT_I64,
+		DOUBLE: COMPACT_DOUBLE,
+		STRING: COMPACT_BINARY,
+		LIST:   COMPACT_LIST,
+		SET:    COMPACT_SET,
+		MAP:    COMPACT_MAP,
+		STRUCT: COMPACT_STRUCT,
+	}
+}
+
+type TCompactProtocolFactory struct {
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTCompactProtocolFactoryConf instead.
+func NewTCompactProtocolFactory() *TCompactProtocolFactory {
+	return NewTCompactProtocolFactoryConf(&TConfiguration{
+		noPropagation: true,
+	})
+}
+
+func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
+	return &TCompactProtocolFactory{
+		cfg: conf,
+	}
+}
+
+func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTCompactProtocolConf(trans, p.cfg)
+}
+
+func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+	p.cfg = conf
+}
+
+type TCompactProtocol struct {
+	trans         TRichTransport
+	origTransport TTransport
+
+	cfg *TConfiguration
+
+	// Used to keep track of the last field for the current and previous structs,
+	// so we can do the delta stuff.
+	lastField   []int
+	lastFieldId int
+
+	// If we encounter a boolean field begin, save the TField here so it can
+	// have the value incorporated.
+	booleanFieldName    string
+	booleanFieldId      int16
+	booleanFieldPending bool
+
+	// If we read a field header, and it's a boolean field, save the boolean
+	// value here so that readBool can use it.
+	boolValue          bool
+	boolValueIsNotNull bool
+	buffer             [64]byte
+}
+
+// Deprecated: Use NewTCompactProtocolConf instead.
+func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
+	return NewTCompactProtocolConf(trans, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
+	PropagateTConfiguration(trans, conf)
+	p := &TCompactProtocol{
+		origTransport: trans,
+		cfg:           conf,
+	}
+	if et, ok := trans.(TRichTransport); ok {
+		p.trans = et
+	} else {
+		p.trans = NewTRichTransport(trans)
+	}
+
+	return p
+}
+
+//
+// Public Writing methods.
+//
+
+// Write a message header to the wire. Compact Protocol messages contain the
+// protocol version so we can migrate forwards in the future if need be.
+func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
+	err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	_, err = p.writeVarint32(seqid)
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	e := p.WriteString(ctx, name)
+	return e
+
+}
+
+func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
+
+// Write a struct begin. This doesn't actually put anything on the wire. We
+// use it as an opportunity to put special placeholder markers on the field
+// stack so we can get the field id deltas correct.
+func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	p.lastField = append(p.lastField, p.lastFieldId)
+	p.lastFieldId = 0
+	return nil
+}
+
+// Write a struct end. This doesn't actually put anything on the wire. We use
+// this as an opportunity to pop the last field from the current struct off
+// of the field stack.
+func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
+	if len(p.lastField) <= 0 {
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
+	}
+	p.lastFieldId = p.lastField[len(p.lastField)-1]
+	p.lastField = p.lastField[:len(p.lastField)-1]
+	return nil
+}
+
+func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	if typeId == BOOL {
+		// we want to possibly include the value, so we'll wait.
+		p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
+		return nil
+	}
+	_, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
+	return NewTProtocolException(err)
+}
+
+// The workhorse of writeFieldBegin. It has the option of doing a
+// 'type override' of the type header. This is used specifically in the
+// boolean field case.
+func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
+	// short lastField = lastField_.pop();
+
+	// if there's a type override, use that.
+	var typeToWrite byte
+	if typeOverride == 0xFF {
+		typeToWrite = byte(p.getCompactType(typeId))
+	} else {
+		typeToWrite = typeOverride
+	}
+	// check if we can use delta encoding for the field id
+	fieldId := int(id)
+	written := 0
+	if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
+		// write them together
+		err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
+		if err != nil {
+			return 0, err
+		}
+	} else {
+		// write them separate
+		err := p.writeByteDirect(typeToWrite)
+		if err != nil {
+			return 0, err
+		}
+		err = p.WriteI16(ctx, id)
+		written = 1 + 2
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	p.lastFieldId = fieldId
+	return written, nil
+}
+
+func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
+
+func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
+	err := p.writeByteDirect(STOP)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	if size == 0 {
+		err := p.writeByteDirect(0)
+		return NewTProtocolException(err)
+	}
+	_, err := p.writeVarint32(int32(size))
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
+
+// Write a list header.
+func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	_, err := p.writeCollectionBegin(elemType, size)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
+
+// Write a set header.
+func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	_, err := p.writeCollectionBegin(elemType, size)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
+
+func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
+	v := byte(COMPACT_BOOLEAN_FALSE)
+	if value {
+		v = byte(COMPACT_BOOLEAN_TRUE)
+	}
+	if p.booleanFieldPending {
+		// we haven't written the field header yet
+		_, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
+		p.booleanFieldPending = false
+		return NewTProtocolException(err)
+	}
+	// we're not part of a field, so just write the value.
+	err := p.writeByteDirect(v)
+	return NewTProtocolException(err)
+}
+
+// Write a byte. Nothing to see here!
+func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
+	err := p.writeByteDirect(byte(value))
+	return NewTProtocolException(err)
+}
+
+// Write an I16 as a zigzag varint.
+func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
+	_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
+	return NewTProtocolException(err)
+}
+
+// Write an i32 as a zigzag varint.
+func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
+	_, err := p.writeVarint32(p.int32ToZigzag(value))
+	return NewTProtocolException(err)
+}
+
+// Write an i64 as a zigzag varint.
+func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
+	_, err := p.writeVarint64(p.int64ToZigzag(value))
+	return NewTProtocolException(err)
+}
+
+// Write a double to the wire as 8 bytes.
+func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
+	buf := p.buffer[0:8]
+	binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
+	_, err := p.trans.Write(buf)
+	return NewTProtocolException(err)
+}
+
+// Write a string to the wire with a varint size preceding.
+func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
+	_, e := p.writeVarint32(int32(len(value)))
+	if e != nil {
+		return NewTProtocolException(e)
+	}
+	if len(value) > 0 {
+	}
+	_, e = p.trans.WriteString(value)
+	return e
+}
+
+// Write a byte array, using a varint for the size.
+func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
+	_, e := p.writeVarint32(int32(len(bin)))
+	if e != nil {
+		return NewTProtocolException(e)
+	}
+	if len(bin) > 0 {
+		_, e = p.trans.Write(bin)
+		return NewTProtocolException(e)
+	}
+	return nil
+}
+
+//
+// Reading methods.
+//
+
+// Read a message header.
+func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+	var protocolId byte
+
+	_, deadlineSet := ctx.Deadline()
+	for {
+		protocolId, err = p.readByteDirect()
+		if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+			// keep retrying I/O timeout errors since we still have
+			// time left
+			continue
+		}
+		// For anything else, don't retry
+		break
+	}
+	if err != nil {
+		return
+	}
+
+	if protocolId != COMPACT_PROTOCOL_ID {
+		e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
+		return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
+	}
+
+	versionAndType, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	version := versionAndType & COMPACT_VERSION_MASK
+	typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
+	if version != COMPACT_VERSION {
+		e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
+		err = NewTProtocolExceptionWithType(BAD_VERSION, e)
+		return
+	}
+	seqId, e := p.readVarint32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	name, err = p.ReadString(ctx)
+	return
+}
+
+func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
+
+// Read a struct begin. There's nothing on the wire for this, but it is our
+// opportunity to push a new struct begin marker onto the field stack.
+func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	p.lastField = append(p.lastField, p.lastFieldId)
+	p.lastFieldId = 0
+	return
+}
+
+// Doesn't actually consume any wire data, just removes the last field for
+// this struct from the field stack.
+func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
+	// consume the last field we read off the wire.
+	if len(p.lastField) <= 0 {
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
+	}
+	p.lastFieldId = p.lastField[len(p.lastField)-1]
+	p.lastField = p.lastField[:len(p.lastField)-1]
+	return nil
+}
+
+// Read a field header off the wire.
+func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
+	t, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	// if it's a stop, then we can return immediately, as the struct is over.
+	if (t & 0x0f) == STOP {
+		return "", STOP, 0, nil
+	}
+
+	// mask off the 4 MSB of the type header. it could contain a field id delta.
+	modifier := int16((t & 0xf0) >> 4)
+	if modifier == 0 {
+		// not a delta. look ahead for the zigzag varint field id.
+		id, err = p.ReadI16(ctx)
+		if err != nil {
+			return
+		}
+	} else {
+		// has a delta. add the delta to the last read field id.
+		id = int16(p.lastFieldId) + modifier
+	}
+	typeId, e := p.getTType(tCompactType(t & 0x0f))
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+
+	// if this happens to be a boolean field, the value is encoded in the type
+	if p.isBoolType(t) {
+		// save the boolean value in a special instance variable.
+		p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
+		p.boolValueIsNotNull = true
+	}
+
+	// push the new field onto the field stack so we can keep the deltas going.
+	p.lastFieldId = int(id)
+	return
+}
+
+func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
+
+// Read a map header off the wire. If the size is zero, skip reading the key
+// and value type. This means that 0-length maps will yield TMaps without the
+// "correct" types.
+func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
+	size32, e := p.readVarint32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+
+	keyAndValueType := byte(STOP)
+	if size != 0 {
+		keyAndValueType, err = p.readByteDirect()
+		if err != nil {
+			return
+		}
+	}
+	keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
+	valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
+	return
+}
+
+func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
+
+// Read a list header off the wire. If the list size is 0-14, the size will
+// be packed into the element type header. If it's a longer list, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+	size_and_type, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+	size = int((size_and_type >> 4) & 0x0f)
+	if size == 15 {
+		size2, e := p.readVarint32()
+		if e != nil {
+			err = NewTProtocolException(e)
+			return
+		}
+		if size2 < 0 {
+			err = invalidDataLength
+			return
+		}
+		size = int(size2)
+	}
+	elemType, e := p.getTType(tCompactType(size_and_type))
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	return
+}
+
+func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
+
+// Read a set header off the wire. If the set size is 0-14, the size will
+// be packed into the element type header. If it's a longer set, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	return p.ReadListBegin(ctx)
+}
+
+func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
+
+// Read a boolean off the wire. If this is a boolean field, the value should
+// already have been read during readFieldBegin, so we'll just consume the
+// pre-stored value. Otherwise, read a byte.
+func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
+	if p.boolValueIsNotNull {
+		p.boolValueIsNotNull = false
+		return p.boolValue, nil
+	}
+	v, err := p.readByteDirect()
+	return v == COMPACT_BOOLEAN_TRUE, err
+}
+
+// Read a single byte off the wire. Nothing interesting here.
+func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
+	v, err := p.readByteDirect()
+	if err != nil {
+		return 0, NewTProtocolException(err)
+	}
+	return int8(v), err
+}
+
+// Read an i16 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+	v, err := p.ReadI32(ctx)
+	return int16(v), err
+}
+
+// Read an i32 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+	v, e := p.readVarint32()
+	if e != nil {
+		return 0, NewTProtocolException(e)
+	}
+	value = p.zigzagToInt32(v)
+	return value, nil
+}
+
+// Read an i64 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+	v, e := p.readVarint64()
+	if e != nil {
+		return 0, NewTProtocolException(e)
+	}
+	value = p.zigzagToInt64(v)
+	return value, nil
+}
+
+// No magic here - just read a double off the wire.
+func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+	longBits := p.buffer[0:8]
+	_, e := io.ReadFull(p.trans, longBits)
+	if e != nil {
+		return 0.0, NewTProtocolException(e)
+	}
+	return math.Float64frombits(p.bytesToUint64(longBits)), nil
+}
+
+// Reads a []byte (via readBinary), and then UTF-8 decodes it.
+func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
+	length, e := p.readVarint32()
+	if e != nil {
+		return "", NewTProtocolException(e)
+	}
+	err = checkSizeForProtocol(length, p.cfg)
+	if err != nil {
+		return
+	}
+	if length == 0 {
+		return "", nil
+	}
+	if length < int32(len(p.buffer)) {
+		// Avoid allocation on small reads
+		buf := p.buffer[:length]
+		read, e := io.ReadFull(p.trans, buf)
+		return string(buf[:read]), NewTProtocolException(e)
+	}
+
+	buf, e := safeReadBytes(length, p.trans)
+	return string(buf), NewTProtocolException(e)
+}
+
+// Read a []byte from the wire.
+func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
+	length, e := p.readVarint32()
+	if e != nil {
+		return nil, NewTProtocolException(e)
+	}
+	err = checkSizeForProtocol(length, p.cfg)
+	if err != nil {
+		return
+	}
+	if length == 0 {
+		return []byte{}, nil
+	}
+
+	buf, e := safeReadBytes(length, p.trans)
+	return buf, NewTProtocolException(e)
+}
+
+func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
+	return NewTProtocolException(p.trans.Flush(ctx))
+}
+
+func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TCompactProtocol) Transport() TTransport {
+	return p.origTransport
+}
+
+//
+// Internal writing methods
+//
+
+// Abstract method for writing the start of lists and sets. List and sets on
+// the wire differ only by the type indicator.
+func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
+	if size <= 14 {
+		return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
+	}
+	err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
+	if err != nil {
+		return 0, err
+	}
+	m, err := p.writeVarint32(int32(size))
+	return 1 + m, err
+}
+
+// Write an i32 as a varint. Results in 1-5 bytes on the wire.
+// TODO(pomack): make a permanent buffer like writeVarint64?
+func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
+	i32buf := p.buffer[0:5]
+	idx := 0
+	for {
+		if (n & ^0x7F) == 0 {
+			i32buf[idx] = byte(n)
+			idx++
+			// p.writeByteDirect(byte(n));
+			break
+			// return;
+		} else {
+			i32buf[idx] = byte((n & 0x7F) | 0x80)
+			idx++
+			// p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
+			u := uint32(n)
+			n = int32(u >> 7)
+		}
+	}
+	return p.trans.Write(i32buf[0:idx])
+}
+
+// Write an i64 as a varint. Results in 1-10 bytes on the wire.
+func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
+	varint64out := p.buffer[0:10]
+	idx := 0
+	for {
+		if (n & ^0x7F) == 0 {
+			varint64out[idx] = byte(n)
+			idx++
+			break
+		} else {
+			varint64out[idx] = byte((n & 0x7F) | 0x80)
+			idx++
+			u := uint64(n)
+			n = int64(u >> 7)
+		}
+	}
+	return p.trans.Write(varint64out[0:idx])
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
+	return (l << 1) ^ (l >> 63)
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
+	return (n << 1) ^ (n >> 31)
+}
+
+func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
+	binary.LittleEndian.PutUint64(buf, n)
+}
+
+func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
+	binary.LittleEndian.PutUint64(buf, uint64(n))
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+// Used internally by other writing methods that know they need to write a byte.
+func (p *TCompactProtocol) writeByteDirect(b byte) error {
+	return p.trans.WriteByte(b)
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
+	return 1, p.writeByteDirect(byte(n))
+}
+
+//
+// Internal reading methods
+//
+
+// Read an i32 from the wire as a varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 5 bytes.
+func (p *TCompactProtocol) readVarint32() (int32, error) {
+	// if the wire contains the right stuff, this will just truncate the i64 we
+	// read and get us the right sign.
+	v, err := p.readVarint64()
+	return int32(v), err
+}
+
+// Read an i64 from the wire as a proper varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 10 bytes.
+func (p *TCompactProtocol) readVarint64() (int64, error) {
+	shift := uint(0)
+	result := int64(0)
+	for {
+		b, err := p.readByteDirect()
+		if err != nil {
+			return 0, err
+		}
+		result |= int64(b&0x7f) << shift
+		if (b & 0x80) != 0x80 {
+			break
+		}
+		shift += 7
+	}
+	return result, nil
+}
+
+// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
+func (p *TCompactProtocol) readByteDirect() (byte, error) {
+	return p.trans.ReadByte()
+}
+
+//
+// encoding helpers
+//
+
+// Convert from zigzag int to int.
+func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
+	u := uint32(n)
+	return int32(u>>1) ^ -(n & 1)
+}
+
+// Convert from zigzag long to long.
+func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
+	u := uint64(n)
+	return int64(u>>1) ^ -(n & 1)
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
+	return int64(binary.LittleEndian.Uint64(b))
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
+	return binary.LittleEndian.Uint64(b)
+}
+
+//
+// type testing and converting
+//
+
+func (p *TCompactProtocol) isBoolType(b byte) bool {
+	return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
+}
+
+// Given a tCompactType constant, convert it to its corresponding
+// TType value.
+func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
+	switch byte(t) & 0x0f {
+	case STOP:
+		return STOP, nil
+	case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
+		return BOOL, nil
+	case COMPACT_BYTE:
+		return BYTE, nil
+	case COMPACT_I16:
+		return I16, nil
+	case COMPACT_I32:
+		return I32, nil
+	case COMPACT_I64:
+		return I64, nil
+	case COMPACT_DOUBLE:
+		return DOUBLE, nil
+	case COMPACT_BINARY:
+		return STRING, nil
+	case COMPACT_LIST:
+		return LIST, nil
+	case COMPACT_SET:
+		return SET, nil
+	case COMPACT_MAP:
+		return MAP, nil
+	case COMPACT_STRUCT:
+		return STRUCT, nil
+	}
+	return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
+}
+
+// Given a TType value, find the appropriate TCompactProtocol.Types constant.
+func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
+	return ttypeToCompactType[t]
+}
+
+func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.trans, conf)
+	PropagateTConfiguration(p.origTransport, conf)
+	p.cfg = conf
+}
+
+var (
+	_ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
+	_ TConfigurationSetter = (*TCompactProtocol)(nil)
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go
new file mode 100644
index 000000000..454d9f377
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go
@@ -0,0 +1,378 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"crypto/tls"
+	"fmt"
+	"time"
+)
+
+// Default TConfiguration values.
+const (
+	DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
+	DEFAULT_MAX_FRAME_SIZE   = 16384000
+
+	DEFAULT_TBINARY_STRICT_READ  = false
+	DEFAULT_TBINARY_STRICT_WRITE = true
+
+	DEFAULT_CONNECT_TIMEOUT = 0
+	DEFAULT_SOCKET_TIMEOUT  = 0
+)
+
+// TConfiguration defines some configurations shared between TTransport,
+// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
+//
+// When constructing TConfiguration, you only need to specify the non-default
+// fields. All zero values have sane default values.
+//
+// Not all configurations defined are applicable to all implementations.
+// Implementations are free to ignore the configurations not applicable to them.
+//
+// All functions attached to this type are nil-safe.
+//
+// See [1] for spec.
+//
+// NOTE: When using TConfiguration, fill in all the configurations you want to
+// set across the stack, not only the ones you want to set in the immediate
+// TTransport/TProtocol.
+//
+// For example, say you want to migrate this old code into using TConfiguration:
+//
+//     sccket := thrift.NewTSocketTimeout("host:port", time.Second)
+//     transFactory := thrift.NewTFramedTransportFactoryMaxLength(
+//         thrift.NewTTransportFactory(),
+//         1024 * 1024 * 256,
+//     )
+//     protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
+//
+// This is the wrong way to do it because in the end the TConfiguration used by
+// socket and transFactory will be overwritten by the one used by protoFactory
+// because of TConfiguration propagation:
+//
+//     // bad example, DO NOT USE
+//     sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
+//         ConnectTimeout: time.Second,
+//         SocketTimeout:  time.Second,
+//     })
+//     transFactory := thrift.NewTFramedTransportFactoryConf(
+//         thrift.NewTTransportFactory(),
+//         &thrift.TConfiguration{
+//             MaxFrameSize: 1024 * 1024 * 256,
+//         },
+//     )
+//     protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
+//         TBinaryStrictRead:  thrift.BoolPtr(true),
+//         TBinaryStrictWrite: thrift.BoolPtr(true),
+//     })
+//
+// This is the correct way to do it:
+//
+//     conf := &thrift.TConfiguration{
+//         ConnectTimeout: time.Second,
+//         SocketTimeout:  time.Second,
+//
+//         MaxFrameSize: 1024 * 1024 * 256,
+//
+//         TBinaryStrictRead:  thrift.BoolPtr(true),
+//         TBinaryStrictWrite: thrift.BoolPtr(true),
+//     }
+//     sccket := thrift.NewTSocketConf("host:port", conf)
+//     transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
+//     protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
+//
+// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
+type TConfiguration struct {
+	// If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
+	MaxMessageSize int32
+
+	// If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
+	//
+	// Also if MaxMessageSize < MaxFrameSize,
+	// MaxMessageSize will be used instead.
+	MaxFrameSize int32
+
+	// Connect and socket timeouts to be used by TSocket and TSSLSocket.
+	//
+	// 0 means no timeout.
+	//
+	// If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
+	// used.
+	ConnectTimeout time.Duration
+	SocketTimeout  time.Duration
+
+	// TLS config to be used by TSSLSocket.
+	TLSConfig *tls.Config
+
+	// Strict read/write configurations for TBinaryProtocol.
+	//
+	// BoolPtr helper function is available to use literal values.
+	TBinaryStrictRead  *bool
+	TBinaryStrictWrite *bool
+
+	// The wrapped protocol id to be used in THeader transport/protocol.
+	//
+	// THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
+	// are provided to help filling this value.
+	THeaderProtocolID *THeaderProtocolID
+
+	// Used internally by deprecated constructors, to avoid overriding
+	// underlying TTransport/TProtocol's cfg by accidental propagations.
+	//
+	// For external users this is always false.
+	noPropagation bool
+}
+
+// GetMaxMessageSize returns the max message size an implementation should
+// follow.
+//
+// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
+func (tc *TConfiguration) GetMaxMessageSize() int32 {
+	if tc == nil || tc.MaxMessageSize <= 0 {
+		return DEFAULT_MAX_MESSAGE_SIZE
+	}
+	return tc.MaxMessageSize
+}
+
+// GetMaxFrameSize returns the max frame size an implementation should follow.
+//
+// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
+//
+// If the configured max message size is smaller than the configured max frame
+// size, the smaller one will be returned instead.
+func (tc *TConfiguration) GetMaxFrameSize() int32 {
+	if tc == nil {
+		return DEFAULT_MAX_FRAME_SIZE
+	}
+	maxFrameSize := tc.MaxFrameSize
+	if maxFrameSize <= 0 {
+		maxFrameSize = DEFAULT_MAX_FRAME_SIZE
+	}
+	if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
+		return maxMessageSize
+	}
+	return maxFrameSize
+}
+
+// GetConnectTimeout returns the connect timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetConnectTimeout() time.Duration {
+	if tc == nil || tc.ConnectTimeout < 0 {
+		return DEFAULT_CONNECT_TIMEOUT
+	}
+	return tc.ConnectTimeout
+}
+
+// GetSocketTimeout returns the socket timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetSocketTimeout() time.Duration {
+	if tc == nil || tc.SocketTimeout < 0 {
+		return DEFAULT_SOCKET_TIMEOUT
+	}
+	return tc.SocketTimeout
+}
+
+// GetTLSConfig returns the tls config should be used by TSSLSocket.
+//
+// It's nil-safe. If tc is nil, nil will be returned instead.
+func (tc *TConfiguration) GetTLSConfig() *tls.Config {
+	if tc == nil {
+		return nil
+	}
+	return tc.TLSConfig
+}
+
+// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
+// tc.TBinaryStrictRead is nil.
+func (tc *TConfiguration) GetTBinaryStrictRead() bool {
+	if tc == nil || tc.TBinaryStrictRead == nil {
+		return DEFAULT_TBINARY_STRICT_READ
+	}
+	return *tc.TBinaryStrictRead
+}
+
+// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
+// tc.TBinaryStrictWrite is nil.
+func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
+	if tc == nil || tc.TBinaryStrictWrite == nil {
+		return DEFAULT_TBINARY_STRICT_WRITE
+	}
+	return *tc.TBinaryStrictWrite
+}
+
+// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
+// THeaderProtocol clients (for servers, they always use the same one as the
+// client instead).
+//
+// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
+// THeaderProtocolDefault will be returned instead.
+// THeaderProtocolDefault will also be returned if configured value is invalid.
+func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
+	if tc == nil || tc.THeaderProtocolID == nil {
+		return THeaderProtocolDefault
+	}
+	protoID := *tc.THeaderProtocolID
+	if err := protoID.Validate(); err != nil {
+		return THeaderProtocolDefault
+	}
+	return protoID
+}
+
+// THeaderProtocolIDPtr validates and returns the pointer to id.
+//
+// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
+// and the validation error will be returned.
+func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
+	err := id.Validate()
+	if err != nil {
+		id = THeaderProtocolDefault
+	}
+	return &id, err
+}
+
+// THeaderProtocolIDPtrMust validates and returns the pointer to id.
+//
+// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
+// instead of returning them.
+func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
+	ptr, err := THeaderProtocolIDPtr(id)
+	if err != nil {
+		panic(err)
+	}
+	return ptr
+}
+
+// TConfigurationSetter is an optional interface TProtocol, TTransport,
+// TProtocolFactory, TTransportFactory, and other implementations can implement.
+//
+// It's intended to be called during intializations.
+// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
+// middle of a message is undefined:
+// It may or may not change the behavior of the current processing message,
+// and it may even cause the current message to fail.
+//
+// Note for implementations: SetTConfiguration might be called multiple times
+// with the same value in quick successions due to the implementation of the
+// propagation. Implementations should make SetTConfiguration as simple as
+// possible (usually just overwrite the stored configuration and propagate it to
+// the wrapped TTransports/TProtocols).
+type TConfigurationSetter interface {
+	SetTConfiguration(*TConfiguration)
+}
+
+// PropagateTConfiguration propagates cfg to impl if impl implements
+// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
+//
+// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
+// with everything being default value, use &TConfiguration{} explicitly instead.
+func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
+	if cfg == nil || cfg.noPropagation {
+		return
+	}
+
+	if setter, ok := impl.(TConfigurationSetter); ok {
+		setter.SetTConfiguration(cfg)
+	}
+}
+
+func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
+	if size < 0 {
+		return NewTProtocolExceptionWithType(
+			NEGATIVE_SIZE,
+			fmt.Errorf("negative size: %d", size),
+		)
+	}
+	if size > cfg.GetMaxMessageSize() {
+		return NewTProtocolExceptionWithType(
+			SIZE_LIMIT,
+			fmt.Errorf("size exceeded max allowed: %d", size),
+		)
+	}
+	return nil
+}
+
+type tTransportFactoryConf struct {
+	delegate TTransportFactory
+	cfg      *TConfiguration
+}
+
+func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
+	trans, err := f.delegate.GetTransport(orig)
+	if err == nil {
+		PropagateTConfiguration(orig, f.cfg)
+		PropagateTConfiguration(trans, f.cfg)
+	}
+	return trans, err
+}
+
+func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(f.delegate, f.cfg)
+	f.cfg = cfg
+}
+
+// TTransportFactoryConf wraps a TTransportFactory to propagate
+// TConfiguration on the factory's GetTransport calls.
+func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
+	return &tTransportFactoryConf{
+		delegate: delegate,
+		cfg:      conf,
+	}
+}
+
+type tProtocolFactoryConf struct {
+	delegate TProtocolFactory
+	cfg      *TConfiguration
+}
+
+func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
+	proto := f.delegate.GetProtocol(trans)
+	PropagateTConfiguration(trans, f.cfg)
+	PropagateTConfiguration(proto, f.cfg)
+	return proto
+}
+
+func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(f.delegate, f.cfg)
+	f.cfg = cfg
+}
+
+// TProtocolFactoryConf wraps a TProtocolFactory to propagate
+// TConfiguration on the factory's GetProtocol calls.
+func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
+	return &tProtocolFactoryConf{
+		delegate: delegate,
+		cfg:      conf,
+	}
+}
+
+var (
+	_ TConfigurationSetter = (*tTransportFactoryConf)(nil)
+	_ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go
new file mode 100644
index 000000000..d15c1bcf8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+var defaultCtx = context.Background()
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go
new file mode 100644
index 000000000..fdf9bfec1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go
@@ -0,0 +1,447 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"fmt"
+)
+
+type TDebugProtocol struct {
+	// Required. The actual TProtocol to do the read/write.
+	Delegate TProtocol
+
+	// Optional. The logger and prefix to log all the args/return values
+	// from Delegate TProtocol calls.
+	//
+	// If Logger is nil, StdLogger using stdlib log package with os.Stderr
+	// will be used. If disable logging is desired, set Logger to NopLogger
+	// explicitly instead of leaving it as nil/unset.
+	Logger    Logger
+	LogPrefix string
+
+	// Optional. An TProtocol to duplicate everything read/written from Delegate.
+	//
+	// A typical use case of this is to use TSimpleJSONProtocol wrapping
+	// TMemoryBuffer in a middleware to json logging requests/responses.
+	//
+	// This feature is not available from TDebugProtocolFactory. In order to
+	// use it you have to construct TDebugProtocol directly, or set DuplicateTo
+	// field after getting a TDebugProtocol from the factory.
+	DuplicateTo TProtocol
+}
+
+type TDebugProtocolFactory struct {
+	Underlying TProtocolFactory
+	LogPrefix  string
+	Logger     Logger
+}
+
+// NewTDebugProtocolFactory creates a TDebugProtocolFactory.
+//
+// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct
+// itself instead. This version will use the default logger from standard
+// library.
+func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
+	return &TDebugProtocolFactory{
+		Underlying: underlying,
+		LogPrefix:  logPrefix,
+		Logger:     StdLogger(nil),
+	}
+}
+
+// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory.
+func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory {
+	return &TDebugProtocolFactory{
+		Underlying: underlying,
+		LogPrefix:  logPrefix,
+		Logger:     logger,
+	}
+}
+
+func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return &TDebugProtocol{
+		Delegate:  t.Underlying.GetProtocol(trans),
+		LogPrefix: t.LogPrefix,
+		Logger:    fallbackLogger(t.Logger),
+	}
+}
+
+func (tdp *TDebugProtocol) logf(format string, v ...interface{}) {
+	fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...))
+}
+
+func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
+	err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid)
+	tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error {
+	err := tdp.Delegate.WriteMessageEnd(ctx)
+	tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteMessageEnd(ctx)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	err := tdp.Delegate.WriteStructBegin(ctx, name)
+	tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteStructBegin(ctx, name)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error {
+	err := tdp.Delegate.WriteStructEnd(ctx)
+	tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteStructEnd(ctx)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id)
+	tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error {
+	err := tdp.Delegate.WriteFieldEnd(ctx)
+	tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteFieldEnd(ctx)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error {
+	err := tdp.Delegate.WriteFieldStop(ctx)
+	tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteFieldStop(ctx)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size)
+	tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error {
+	err := tdp.Delegate.WriteMapEnd(ctx)
+	tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteMapEnd(ctx)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	err := tdp.Delegate.WriteListBegin(ctx, elemType, size)
+	tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteListBegin(ctx, elemType, size)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error {
+	err := tdp.Delegate.WriteListEnd(ctx)
+	tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteListEnd(ctx)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	err := tdp.Delegate.WriteSetBegin(ctx, elemType, size)
+	tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error {
+	err := tdp.Delegate.WriteSetEnd(ctx)
+	tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteSetEnd(ctx)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error {
+	err := tdp.Delegate.WriteBool(ctx, value)
+	tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteBool(ctx, value)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error {
+	err := tdp.Delegate.WriteByte(ctx, value)
+	tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteByte(ctx, value)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error {
+	err := tdp.Delegate.WriteI16(ctx, value)
+	tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteI16(ctx, value)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error {
+	err := tdp.Delegate.WriteI32(ctx, value)
+	tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteI32(ctx, value)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error {
+	err := tdp.Delegate.WriteI64(ctx, value)
+	tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteI64(ctx, value)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error {
+	err := tdp.Delegate.WriteDouble(ctx, value)
+	tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteDouble(ctx, value)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error {
+	err := tdp.Delegate.WriteString(ctx, value)
+	tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteString(ctx, value)
+	}
+	return err
+}
+func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error {
+	err := tdp.Delegate.WriteBinary(ctx, value)
+	tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteBinary(ctx, value)
+	}
+	return err
+}
+
+func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) {
+	name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx)
+	tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) {
+	err = tdp.Delegate.ReadMessageEnd(ctx)
+	tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteMessageEnd(ctx)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	name, err = tdp.Delegate.ReadStructBegin(ctx)
+	tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteStructBegin(ctx, name)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) {
+	err = tdp.Delegate.ReadStructEnd(ctx)
+	tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteStructEnd(ctx)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
+	name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx)
+	tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) {
+	err = tdp.Delegate.ReadFieldEnd(ctx)
+	tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteFieldEnd(ctx)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
+	keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx)
+	tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) {
+	err = tdp.Delegate.ReadMapEnd(ctx)
+	tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteMapEnd(ctx)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+	elemType, size, err = tdp.Delegate.ReadListBegin(ctx)
+	tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteListBegin(ctx, elemType, size)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) {
+	err = tdp.Delegate.ReadListEnd(ctx)
+	tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteListEnd(ctx)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	elemType, size, err = tdp.Delegate.ReadSetBegin(ctx)
+	tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) {
+	err = tdp.Delegate.ReadSetEnd(ctx)
+	tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteSetEnd(ctx)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) {
+	value, err = tdp.Delegate.ReadBool(ctx)
+	tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteBool(ctx, value)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) {
+	value, err = tdp.Delegate.ReadByte(ctx)
+	tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteByte(ctx, value)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+	value, err = tdp.Delegate.ReadI16(ctx)
+	tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteI16(ctx, value)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+	value, err = tdp.Delegate.ReadI32(ctx)
+	tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteI32(ctx, value)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+	value, err = tdp.Delegate.ReadI64(ctx)
+	tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteI64(ctx, value)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+	value, err = tdp.Delegate.ReadDouble(ctx)
+	tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteDouble(ctx, value)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) {
+	value, err = tdp.Delegate.ReadString(ctx)
+	tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteString(ctx, value)
+	}
+	return
+}
+func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
+	value, err = tdp.Delegate.ReadBinary(ctx)
+	tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.WriteBinary(ctx, value)
+	}
+	return
+}
+func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	err = tdp.Delegate.Skip(ctx, fieldType)
+	tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.Skip(ctx, fieldType)
+	}
+	return
+}
+func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) {
+	err = tdp.Delegate.Flush(ctx)
+	tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
+	if tdp.DuplicateTo != nil {
+		tdp.DuplicateTo.Flush(ctx)
+	}
+	return
+}
+
+func (tdp *TDebugProtocol) Transport() TTransport {
+	return tdp.Delegate.Transport()
+}
+
+// SetTConfiguration implements TConfigurationSetter for propagation.
+func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(tdp.Delegate, conf)
+	PropagateTConfiguration(tdp.DuplicateTo, conf)
+}
+
+var _ TConfigurationSetter = (*TDebugProtocol)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go
new file mode 100644
index 000000000..cefc7ecda
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"sync"
+)
+
+type TDeserializer struct {
+	Transport *TMemoryBuffer
+	Protocol  TProtocol
+}
+
+func NewTDeserializer() *TDeserializer {
+	transport := NewTMemoryBufferLen(1024)
+	protocol := NewTBinaryProtocolTransport(transport)
+
+	return &TDeserializer{
+		Transport: transport,
+		Protocol:  protocol,
+	}
+}
+
+func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) {
+	t.Transport.Reset()
+
+	err = nil
+	if _, err = t.Transport.Write([]byte(s)); err != nil {
+		return
+	}
+	if err = msg.Read(ctx, t.Protocol); err != nil {
+		return
+	}
+	return
+}
+
+func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) {
+	t.Transport.Reset()
+
+	err = nil
+	if _, err = t.Transport.Write(b); err != nil {
+		return
+	}
+	if err = msg.Read(ctx, t.Protocol); err != nil {
+		return
+	}
+	return
+}
+
+// TDeserializerPool is the thread-safe version of TDeserializer,
+// it uses resource pool of TDeserializer under the hood.
+//
+// It must be initialized with either NewTDeserializerPool or
+// NewTDeserializerPoolSizeFactory.
+type TDeserializerPool struct {
+	pool sync.Pool
+}
+
+// NewTDeserializerPool creates a new TDeserializerPool.
+//
+// NewTDeserializer can be used as the arg here.
+func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool {
+	return &TDeserializerPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return f()
+			},
+		},
+	}
+}
+
+// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with
+// the given size and protocol factory.
+//
+// Note that the size is not the limit. The TMemoryBuffer underneath can grow
+// larger than that. It just dictates the initial size.
+func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool {
+	return &TDeserializerPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				transport := NewTMemoryBufferLen(size)
+				protocol := factory.GetProtocol(transport)
+
+				return &TDeserializer{
+					Transport: transport,
+					Protocol:  protocol,
+				}
+			},
+		},
+	}
+}
+
+func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error {
+	d := t.pool.Get().(*TDeserializer)
+	defer t.pool.Put(d)
+	return d.ReadString(ctx, msg, s)
+}
+
+func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error {
+	d := t.pool.Get().(*TDeserializer)
+	defer t.pool.Put(d)
+	return d.Read(ctx, msg, b)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go
new file mode 100644
index 000000000..53bf862ea
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+)
+
+// Generic Thrift exception
+type TException interface {
+	error
+
+	TExceptionType() TExceptionType
+}
+
+// Prepends additional information to an error without losing the Thrift exception interface
+func PrependError(prepend string, err error) error {
+	msg := prepend + err.Error()
+
+	var te TException
+	if errors.As(err, &te) {
+		switch te.TExceptionType() {
+		case TExceptionTypeTransport:
+			if t, ok := err.(TTransportException); ok {
+				return prependTTransportException(prepend, t)
+			}
+		case TExceptionTypeProtocol:
+			if t, ok := err.(TProtocolException); ok {
+				return prependTProtocolException(prepend, t)
+			}
+		case TExceptionTypeApplication:
+			var t TApplicationException
+			if errors.As(err, &t) {
+				return NewTApplicationException(t.TypeId(), msg)
+			}
+		}
+
+		return wrappedTException{
+			err:            err,
+			msg:            msg,
+			tExceptionType: te.TExceptionType(),
+		}
+	}
+
+	return errors.New(msg)
+}
+
+// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
+type TExceptionType byte
+
+// TExceptionType values
+const (
+	TExceptionTypeUnknown     TExceptionType = iota
+	TExceptionTypeCompiled                   // TExceptions defined in thrift files and generated by thrift compiler
+	TExceptionTypeApplication                // TApplicationExceptions
+	TExceptionTypeProtocol                   // TProtocolExceptions
+	TExceptionTypeTransport                  // TTransportExceptions
+)
+
+// WrapTException wraps an error into TException.
+//
+// If err is nil or already TException, it's returned as-is.
+// Otherwise it will be wraped into TException with TExceptionType() returning
+// TExceptionTypeUnknown, and Unwrap() returning the original error.
+func WrapTException(err error) TException {
+	if err == nil {
+		return nil
+	}
+
+	if te, ok := err.(TException); ok {
+		return te
+	}
+
+	return wrappedTException{
+		err:            err,
+		msg:            err.Error(),
+		tExceptionType: TExceptionTypeUnknown,
+	}
+}
+
+type wrappedTException struct {
+	err            error
+	msg            string
+	tExceptionType TExceptionType
+}
+
+func (w wrappedTException) Error() string {
+	return w.msg
+}
+
+func (w wrappedTException) TExceptionType() TExceptionType {
+	return w.tExceptionType
+}
+
+func (w wrappedTException) Unwrap() error {
+	return w.err
+}
+
+var _ TException = wrappedTException{}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go
new file mode 100644
index 000000000..f683e7f54
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead.
+const DEFAULT_MAX_LENGTH = 16384000
+
+type TFramedTransport struct {
+	transport TTransport
+
+	cfg *TConfiguration
+
+	writeBuf bytes.Buffer
+
+	reader  *bufio.Reader
+	readBuf bytes.Buffer
+
+	buffer [4]byte
+}
+
+type tFramedTransportFactory struct {
+	factory TTransportFactory
+	cfg     *TConfiguration
+}
+
+// Deprecated: Use NewTFramedTransportFactoryConf instead.
+func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
+	return NewTFramedTransportFactoryConf(factory, &TConfiguration{
+		MaxFrameSize: DEFAULT_MAX_LENGTH,
+
+		noPropagation: true,
+	})
+}
+
+// Deprecated: Use NewTFramedTransportFactoryConf instead.
+func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
+	return NewTFramedTransportFactoryConf(factory, &TConfiguration{
+		MaxFrameSize: int32(maxLength),
+
+		noPropagation: true,
+	})
+}
+
+func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
+	PropagateTConfiguration(factory, conf)
+	return &tFramedTransportFactory{
+		factory: factory,
+		cfg:     conf,
+	}
+}
+
+func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
+	PropagateTConfiguration(base, p.cfg)
+	tt, err := p.factory.GetTransport(base)
+	if err != nil {
+		return nil, err
+	}
+	return NewTFramedTransportConf(tt, p.cfg), nil
+}
+
+func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(p.factory, cfg)
+	p.cfg = cfg
+}
+
+// Deprecated: Use NewTFramedTransportConf instead.
+func NewTFramedTransport(transport TTransport) *TFramedTransport {
+	return NewTFramedTransportConf(transport, &TConfiguration{
+		MaxFrameSize: DEFAULT_MAX_LENGTH,
+
+		noPropagation: true,
+	})
+}
+
+// Deprecated: Use NewTFramedTransportConf instead.
+func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
+	return NewTFramedTransportConf(transport, &TConfiguration{
+		MaxFrameSize: int32(maxLength),
+
+		noPropagation: true,
+	})
+}
+
+func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport {
+	PropagateTConfiguration(transport, conf)
+	return &TFramedTransport{
+		transport: transport,
+		reader:    bufio.NewReader(transport),
+		cfg:       conf,
+	}
+}
+
+func (p *TFramedTransport) Open() error {
+	return p.transport.Open()
+}
+
+func (p *TFramedTransport) IsOpen() bool {
+	return p.transport.IsOpen()
+}
+
+func (p *TFramedTransport) Close() error {
+	return p.transport.Close()
+}
+
+func (p *TFramedTransport) Read(buf []byte) (read int, err error) {
+	read, err = p.readBuf.Read(buf)
+	if err != io.EOF {
+		return
+	}
+
+	// For bytes.Buffer.Read, EOF would only happen when read is zero,
+	// but still, do a sanity check,
+	// in case that behavior is changed in a future version of go stdlib.
+	// When that happens, just return nil error,
+	// and let the caller call Read again to read the next frame.
+	if read > 0 {
+		return read, nil
+	}
+
+	// Reaching here means that the last Read finished the last frame,
+	// so we need to read the next frame into readBuf now.
+	if err = p.readFrame(); err != nil {
+		return read, err
+	}
+	newRead, err := p.Read(buf[read:])
+	return read + newRead, err
+}
+
+func (p *TFramedTransport) ReadByte() (c byte, err error) {
+	buf := p.buffer[:1]
+	_, err = p.Read(buf)
+	if err != nil {
+		return
+	}
+	c = buf[0]
+	return
+}
+
+func (p *TFramedTransport) Write(buf []byte) (int, error) {
+	n, err := p.writeBuf.Write(buf)
+	return n, NewTTransportExceptionFromError(err)
+}
+
+func (p *TFramedTransport) WriteByte(c byte) error {
+	return p.writeBuf.WriteByte(c)
+}
+
+func (p *TFramedTransport) WriteString(s string) (n int, err error) {
+	return p.writeBuf.WriteString(s)
+}
+
+func (p *TFramedTransport) Flush(ctx context.Context) error {
+	size := p.writeBuf.Len()
+	buf := p.buffer[:4]
+	binary.BigEndian.PutUint32(buf, uint32(size))
+	_, err := p.transport.Write(buf)
+	if err != nil {
+		p.writeBuf.Reset()
+		return NewTTransportExceptionFromError(err)
+	}
+	if size > 0 {
+		if _, err := io.Copy(p.transport, &p.writeBuf); err != nil {
+			p.writeBuf.Reset()
+			return NewTTransportExceptionFromError(err)
+		}
+	}
+	err = p.transport.Flush(ctx)
+	return NewTTransportExceptionFromError(err)
+}
+
+func (p *TFramedTransport) readFrame() error {
+	buf := p.buffer[:4]
+	if _, err := io.ReadFull(p.reader, buf); err != nil {
+		return err
+	}
+	size := binary.BigEndian.Uint32(buf)
+	if size < 0 || size > uint32(p.cfg.GetMaxFrameSize()) {
+		return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
+	}
+	_, err := io.CopyN(&p.readBuf, p.reader, int64(size))
+	return NewTTransportExceptionFromError(err)
+}
+
+func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
+	return uint64(p.readBuf.Len())
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(p.transport, cfg)
+	p.cfg = cfg
+}
+
+var (
+	_ TConfigurationSetter = (*tFramedTransportFactory)(nil)
+	_ TConfigurationSetter = (*TFramedTransport)(nil)
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go
new file mode 100644
index 000000000..ac9bd4882
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type (
+	headerKey     string
+	headerKeyList int
+)
+
+// Values for headerKeyList.
+const (
+	headerKeyListRead headerKeyList = iota
+	headerKeyListWrite
+)
+
+// SetHeader sets a header in the context.
+func SetHeader(ctx context.Context, key, value string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKey(key),
+		value,
+	)
+}
+
+// UnsetHeader unsets a previously set header in the context.
+func UnsetHeader(ctx context.Context, key string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKey(key),
+		nil,
+	)
+}
+
+// GetHeader returns a value of the given header from the context.
+func GetHeader(ctx context.Context, key string) (value string, ok bool) {
+	if v := ctx.Value(headerKey(key)); v != nil {
+		value, ok = v.(string)
+	}
+	return
+}
+
+// SetReadHeaderList sets the key list of read THeaders in the context.
+func SetReadHeaderList(ctx context.Context, keys []string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKeyListRead,
+		keys,
+	)
+}
+
+// GetReadHeaderList returns the key list of read THeaders from the context.
+func GetReadHeaderList(ctx context.Context) []string {
+	if v := ctx.Value(headerKeyListRead); v != nil {
+		if value, ok := v.([]string); ok {
+			return value
+		}
+	}
+	return nil
+}
+
+// SetWriteHeaderList sets the key list of THeaders to write in the context.
+func SetWriteHeaderList(ctx context.Context, keys []string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKeyListWrite,
+		keys,
+	)
+}
+
+// GetWriteHeaderList returns the key list of THeaders to write from the context.
+func GetWriteHeaderList(ctx context.Context) []string {
+	if v := ctx.Value(headerKeyListWrite); v != nil {
+		if value, ok := v.([]string); ok {
+			return value
+		}
+	}
+	return nil
+}
+
+// AddReadTHeaderToContext adds the whole THeader headers into context.
+func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context {
+	keys := make([]string, 0, len(headers))
+	for key, value := range headers {
+		ctx = SetHeader(ctx, key, value)
+		keys = append(keys, key)
+	}
+	return SetReadHeaderList(ctx, keys)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go
new file mode 100644
index 000000000..878041f8d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"errors"
+)
+
+// THeaderProtocol is a thrift protocol that implements THeader:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+//
+// It supports either binary or compact protocol as the wrapped protocol.
+//
+// Most of the THeader handlings are happening inside THeaderTransport.
+type THeaderProtocol struct {
+	transport *THeaderTransport
+
+	// Will be initialized on first read/write.
+	protocol TProtocol
+
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderProtocolConf instead.
+func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
+	return newTHeaderProtocolConf(trans, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
+// transport with given TConfiguration.
+//
+// The passed in transport will be wrapped with THeaderTransport.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
+	return newTHeaderProtocolConf(trans, conf)
+}
+
+func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
+	t := NewTHeaderTransportConf(trans, cfg)
+	p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
+	PropagateTConfiguration(p, cfg)
+	return &THeaderProtocol{
+		transport: t,
+		protocol:  p,
+		cfg:       cfg,
+	}
+}
+
+type tHeaderProtocolFactory struct {
+	cfg *TConfiguration
+}
+
+func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return newTHeaderProtocolConf(trans, f.cfg)
+}
+
+func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
+	f.cfg = cfg
+}
+
+// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
+func NewTHeaderProtocolFactory() TProtocolFactory {
+	return NewTHeaderProtocolFactoryConf(&TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
+// TConfiguration.
+func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
+	return tHeaderProtocolFactory{
+		cfg: conf,
+	}
+}
+
+// Transport returns the underlying transport.
+//
+// It's guaranteed to be of type *THeaderTransport.
+func (p *THeaderProtocol) Transport() TTransport {
+	return p.transport
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
+	return p.transport.GetReadHeaders()
+}
+
+// SetWriteHeader sets a header for write.
+func (p *THeaderProtocol) SetWriteHeader(key, value string) {
+	p.transport.SetWriteHeader(key, value)
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (p *THeaderProtocol) ClearWriteHeaders() {
+	p.transport.ClearWriteHeaders()
+}
+
+// AddTransform add a transform for writing.
+func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
+	return p.transport.AddTransform(transform)
+}
+
+func (p *THeaderProtocol) Flush(ctx context.Context) error {
+	return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
+	newProto, err := p.transport.Protocol().GetProtocol(p.transport)
+	if err != nil {
+		return err
+	}
+	PropagateTConfiguration(newProto, p.cfg)
+	p.protocol = newProto
+	p.transport.SequenceID = seqID
+	return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
+}
+
+func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
+	if err := p.protocol.WriteMessageEnd(ctx); err != nil {
+		return err
+	}
+	return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	return p.protocol.WriteStructBegin(ctx, name)
+}
+
+func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
+	return p.protocol.WriteStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
+	return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
+}
+
+func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
+	return p.protocol.WriteFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
+	return p.protocol.WriteFieldStop(ctx)
+}
+
+func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
+}
+
+func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
+	return p.protocol.WriteMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	return p.protocol.WriteListBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
+	return p.protocol.WriteListEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	return p.protocol.WriteSetBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
+	return p.protocol.WriteSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
+	return p.protocol.WriteBool(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
+	return p.protocol.WriteByte(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
+	return p.protocol.WriteI16(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
+	return p.protocol.WriteI32(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
+	return p.protocol.WriteI64(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
+	return p.protocol.WriteDouble(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
+	return p.protocol.WriteString(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
+	return p.protocol.WriteBinary(ctx, value)
+}
+
+// ReadFrame calls underlying THeaderTransport's ReadFrame function.
+func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
+	return p.transport.ReadFrame(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
+	if err = p.transport.ReadFrame(ctx); err != nil {
+		return
+	}
+
+	var newProto TProtocol
+	newProto, err = p.transport.Protocol().GetProtocol(p.transport)
+	if err != nil {
+		var tAppExc TApplicationException
+		if !errors.As(err, &tAppExc) {
+			return
+		}
+		if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
+			return
+		}
+		if e := tAppExc.Write(ctx, p.protocol); e != nil {
+			return
+		}
+		if e := p.protocol.WriteMessageEnd(ctx); e != nil {
+			return
+		}
+		if e := p.transport.Flush(ctx); e != nil {
+			return
+		}
+		return
+	}
+	PropagateTConfiguration(newProto, p.cfg)
+	p.protocol = newProto
+
+	return p.protocol.ReadMessageBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
+	return p.protocol.ReadMessageEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	return p.protocol.ReadStructBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
+	return p.protocol.ReadStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
+	return p.protocol.ReadFieldBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
+	return p.protocol.ReadFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
+	return p.protocol.ReadMapBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
+	return p.protocol.ReadMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+	return p.protocol.ReadListBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
+	return p.protocol.ReadListEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	return p.protocol.ReadSetBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
+	return p.protocol.ReadSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
+	return p.protocol.ReadBool(ctx)
+}
+
+func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
+	return p.protocol.ReadByte(ctx)
+}
+
+func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+	return p.protocol.ReadI16(ctx)
+}
+
+func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+	return p.protocol.ReadI32(ctx)
+}
+
+func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+	return p.protocol.ReadI64(ctx)
+}
+
+func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+	return p.protocol.ReadDouble(ctx)
+}
+
+func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
+	return p.protocol.ReadString(ctx)
+}
+
+func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
+	return p.protocol.ReadBinary(ctx)
+}
+
+func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
+	return p.protocol.Skip(ctx, fieldType)
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(p.transport, cfg)
+	PropagateTConfiguration(p.protocol, cfg)
+	p.cfg = cfg
+}
+
+var (
+	_ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
+	_ TConfigurationSetter = (*THeaderProtocol)(nil)
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go
new file mode 100644
index 000000000..6a99535a4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go
@@ -0,0 +1,809 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"bytes"
+	"compress/zlib"
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+)
+
+// Size in bytes for 32-bit ints.
+const size32 = 4
+
+type headerMeta struct {
+	MagicFlags   uint32
+	SequenceID   int32
+	HeaderLength uint16
+}
+
+const headerMetaSize = 10
+
+type clientType int
+
+const (
+	clientUnknown clientType = iota
+	clientHeaders
+	clientFramedBinary
+	clientUnframedBinary
+	clientFramedCompact
+	clientUnframedCompact
+)
+
+// Constants defined in THeader format:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+const (
+	THeaderHeaderMagic  uint32 = 0x0fff0000
+	THeaderHeaderMask   uint32 = 0xffff0000
+	THeaderFlagsMask    uint32 = 0x0000ffff
+	THeaderMaxFrameSize uint32 = 0x3fffffff
+)
+
+// THeaderMap is the type of the header map in THeader transport.
+type THeaderMap map[string]string
+
+// THeaderProtocolID is the wrapped protocol id used in THeader.
+type THeaderProtocolID int32
+
+// Supported THeaderProtocolID values.
+const (
+	THeaderProtocolBinary  THeaderProtocolID = 0x00
+	THeaderProtocolCompact THeaderProtocolID = 0x02
+	THeaderProtocolDefault                   = THeaderProtocolBinary
+)
+
+// Declared globally to avoid repetitive allocations, not really used.
+var globalMemoryBuffer = NewTMemoryBuffer()
+
+// Validate checks whether the THeaderProtocolID is a valid/supported one.
+func (id THeaderProtocolID) Validate() error {
+	_, err := id.GetProtocol(globalMemoryBuffer)
+	return err
+}
+
+// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
+func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
+	switch id {
+	default:
+		return nil, NewTApplicationException(
+			INVALID_PROTOCOL,
+			fmt.Sprintf("THeader protocol id %d not supported", id),
+		)
+	case THeaderProtocolBinary:
+		return NewTBinaryProtocolTransport(trans), nil
+	case THeaderProtocolCompact:
+		return NewTCompactProtocol(trans), nil
+	}
+}
+
+// THeaderTransformID defines the numeric id of the transform used.
+type THeaderTransformID int32
+
+// THeaderTransformID values.
+//
+// Values not defined here are not currently supported, namely HMAC and Snappy.
+const (
+	TransformNone THeaderTransformID = iota // 0, no special handling
+	TransformZlib                           // 1, zlib
+)
+
+var supportedTransformIDs = map[THeaderTransformID]bool{
+	TransformNone: true,
+	TransformZlib: true,
+}
+
+// TransformReader is an io.ReadCloser that handles transforms reading.
+type TransformReader struct {
+	io.Reader
+
+	closers []io.Closer
+}
+
+var _ io.ReadCloser = (*TransformReader)(nil)
+
+// NewTransformReaderWithCapacity initializes a TransformReader with expected
+// closers capacity.
+//
+// If you don't know the closers capacity beforehand, just use
+//
+//     &TransformReader{Reader: baseReader}
+//
+// instead would be sufficient.
+func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader {
+	return &TransformReader{
+		Reader:  baseReader,
+		closers: make([]io.Closer, 0, capacity),
+	}
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tr *TransformReader) Close() error {
+	// Call closers in reversed order
+	for i := len(tr.closers) - 1; i >= 0; i-- {
+		if err := tr.closers[i].Close(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// AddTransform adds a transform.
+func (tr *TransformReader) AddTransform(id THeaderTransformID) error {
+	switch id {
+	default:
+		return NewTApplicationException(
+			INVALID_TRANSFORM,
+			fmt.Sprintf("THeaderTransformID %d not supported", id),
+		)
+	case TransformNone:
+		// no-op
+	case TransformZlib:
+		readCloser, err := zlib.NewReader(tr.Reader)
+		if err != nil {
+			return err
+		}
+		tr.Reader = readCloser
+		tr.closers = append(tr.closers, readCloser)
+	}
+	return nil
+}
+
+// TransformWriter is an io.WriteCloser that handles transforms writing.
+type TransformWriter struct {
+	io.Writer
+
+	closers []io.Closer
+}
+
+var _ io.WriteCloser = (*TransformWriter)(nil)
+
+// NewTransformWriter creates a new TransformWriter with base writer and transforms.
+func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) {
+	writer := &TransformWriter{
+		Writer:  baseWriter,
+		closers: make([]io.Closer, 0, len(transforms)),
+	}
+	for _, id := range transforms {
+		if err := writer.AddTransform(id); err != nil {
+			return nil, err
+		}
+	}
+	return writer, nil
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tw *TransformWriter) Close() error {
+	// Call closers in reversed order
+	for i := len(tw.closers) - 1; i >= 0; i-- {
+		if err := tw.closers[i].Close(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// AddTransform adds a transform.
+func (tw *TransformWriter) AddTransform(id THeaderTransformID) error {
+	switch id {
+	default:
+		return NewTApplicationException(
+			INVALID_TRANSFORM,
+			fmt.Sprintf("THeaderTransformID %d not supported", id),
+		)
+	case TransformNone:
+		// no-op
+	case TransformZlib:
+		writeCloser := zlib.NewWriter(tw.Writer)
+		tw.Writer = writeCloser
+		tw.closers = append(tw.closers, writeCloser)
+	}
+	return nil
+}
+
+// THeaderInfoType is the type id of the info headers.
+type THeaderInfoType int32
+
+// Supported THeaderInfoType values.
+const (
+	_            THeaderInfoType = iota // Skip 0
+	InfoKeyValue                        // 1
+	// Rest of the info types are not supported.
+)
+
+// THeaderTransport is a Transport mode that implements THeader.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+type THeaderTransport struct {
+	SequenceID int32
+	Flags      uint32
+
+	transport TTransport
+
+	// THeaderMap for read and write
+	readHeaders  THeaderMap
+	writeHeaders THeaderMap
+
+	// Reading related variables.
+	reader *bufio.Reader
+	// When frame is detected, we read the frame fully into frameBuffer.
+	frameBuffer bytes.Buffer
+	// When it's non-nil, Read should read from frameReader instead of
+	// reader, and EOF error indicates end of frame instead of end of all
+	// transport.
+	frameReader io.ReadCloser
+
+	// Writing related variables
+	writeBuffer     bytes.Buffer
+	writeTransforms []THeaderTransformID
+
+	clientType clientType
+	protocolID THeaderProtocolID
+	cfg        *TConfiguration
+
+	// buffer is used in the following scenarios to avoid repetitive
+	// allocations, while 4 is big enough for all those scenarios:
+	//
+	// * header padding (max size 4)
+	// * write the frame size (size 4)
+	buffer [4]byte
+}
+
+var _ TTransport = (*THeaderTransport)(nil)
+
+// Deprecated: Use NewTHeaderTransportConf instead.
+func NewTHeaderTransport(trans TTransport) *THeaderTransport {
+	return NewTHeaderTransportConf(trans, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderTransportConf creates THeaderTransport from the
+// underlying transport, with given TConfiguration attached.
+//
+// If trans is already a *THeaderTransport, it will be returned as is,
+// but with TConfiguration overridden by the value passed in.
+//
+// The protocol ID in TConfiguration is only useful for client transports.
+// For servers,
+// the protocol ID will be overridden again to the one set by the client,
+// to ensure that servers always speak the same dialect as the client.
+func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
+	if ht, ok := trans.(*THeaderTransport); ok {
+		ht.SetTConfiguration(conf)
+		return ht
+	}
+	PropagateTConfiguration(trans, conf)
+	return &THeaderTransport{
+		transport:    trans,
+		reader:       bufio.NewReader(trans),
+		writeHeaders: make(THeaderMap),
+		protocolID:   conf.GetTHeaderProtocolID(),
+		cfg:          conf,
+	}
+}
+
+// Open calls the underlying transport's Open function.
+func (t *THeaderTransport) Open() error {
+	return t.transport.Open()
+}
+
+// IsOpen calls the underlying transport's IsOpen function.
+func (t *THeaderTransport) IsOpen() bool {
+	return t.transport.IsOpen()
+}
+
+// ReadFrame tries to read the frame header, guess the client type, and handle
+// unframed clients.
+func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
+	if !t.needReadFrame() {
+		// No need to read frame, skipping.
+		return nil
+	}
+
+	// Peek and handle the first 32 bits.
+	// They could either be the length field of a framed message,
+	// or the first bytes of an unframed message.
+	var buf []byte
+	var err error
+	// This is also usually the first read from a connection,
+	// so handle retries around socket timeouts.
+	_, deadlineSet := ctx.Deadline()
+	for {
+		buf, err = t.reader.Peek(size32)
+		if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+			// This is I/O timeout and we still have time,
+			// continue trying
+			continue
+		}
+		// For anything else, do not retry
+		break
+	}
+	if err != nil {
+		return err
+	}
+
+	frameSize := binary.BigEndian.Uint32(buf)
+	if frameSize&VERSION_MASK == VERSION_1 {
+		t.clientType = clientUnframedBinary
+		return nil
+	}
+	if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+		t.clientType = clientUnframedCompact
+		return nil
+	}
+
+	// At this point it should be a framed message,
+	// sanity check on frameSize then discard the peeked part.
+	if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
+		return NewTProtocolExceptionWithType(
+			SIZE_LIMIT,
+			errors.New("frame too large"),
+		)
+	}
+	t.reader.Discard(size32)
+
+	// Read the frame fully into frameBuffer.
+	_, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
+	if err != nil {
+		return err
+	}
+	t.frameReader = io.NopCloser(&t.frameBuffer)
+
+	// Peek and handle the next 32 bits.
+	buf = t.frameBuffer.Bytes()[:size32]
+	version := binary.BigEndian.Uint32(buf)
+	if version&THeaderHeaderMask == THeaderHeaderMagic {
+		t.clientType = clientHeaders
+		return t.parseHeaders(ctx, frameSize)
+	}
+	if version&VERSION_MASK == VERSION_1 {
+		t.clientType = clientFramedBinary
+		return nil
+	}
+	if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+		t.clientType = clientFramedCompact
+		return nil
+	}
+	if err := t.endOfFrame(); err != nil {
+		return err
+	}
+	return NewTProtocolExceptionWithType(
+		NOT_IMPLEMENTED,
+		errors.New("unsupported client transport type"),
+	)
+}
+
+// endOfFrame does end of frame handling.
+//
+// It closes frameReader, and also resets frame related states.
+func (t *THeaderTransport) endOfFrame() error {
+	defer func() {
+		t.frameBuffer.Reset()
+		t.frameReader = nil
+	}()
+	return t.frameReader.Close()
+}
+
+func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
+	if t.clientType != clientHeaders {
+		return nil
+	}
+
+	var err error
+	var meta headerMeta
+	if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil {
+		return err
+	}
+	frameSize -= headerMetaSize
+	t.Flags = meta.MagicFlags & THeaderFlagsMask
+	t.SequenceID = meta.SequenceID
+	headerLength := int64(meta.HeaderLength) * 4
+	if int64(frameSize) < headerLength {
+		return NewTProtocolExceptionWithType(
+			SIZE_LIMIT,
+			errors.New("header size is larger than the whole frame"),
+		)
+	}
+	headerBuf := NewTMemoryBuffer()
+	_, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
+	if err != nil {
+		return err
+	}
+	hp := NewTCompactProtocol(headerBuf)
+	hp.SetTConfiguration(t.cfg)
+
+	// At this point the header is already read into headerBuf,
+	// and t.frameBuffer starts from the actual payload.
+	protoID, err := hp.readVarint32()
+	if err != nil {
+		return err
+	}
+	t.protocolID = THeaderProtocolID(protoID)
+
+	var transformCount int32
+	transformCount, err = hp.readVarint32()
+	if err != nil {
+		return err
+	}
+	if transformCount > 0 {
+		reader := NewTransformReaderWithCapacity(
+			&t.frameBuffer,
+			int(transformCount),
+		)
+		t.frameReader = reader
+		transformIDs := make([]THeaderTransformID, transformCount)
+		for i := 0; i < int(transformCount); i++ {
+			id, err := hp.readVarint32()
+			if err != nil {
+				return err
+			}
+			transformIDs[i] = THeaderTransformID(id)
+		}
+		// The transform IDs on the wire was added based on the order of
+		// writing, so on the reading side we need to reverse the order.
+		for i := transformCount - 1; i >= 0; i-- {
+			id := transformIDs[i]
+			if err := reader.AddTransform(id); err != nil {
+				return err
+			}
+		}
+	}
+
+	// The info part does not use the transforms yet, so it's
+	// important to continue using headerBuf.
+	headers := make(THeaderMap)
+	for {
+		infoType, err := hp.readVarint32()
+		if errors.Is(err, io.EOF) {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		if THeaderInfoType(infoType) == InfoKeyValue {
+			count, err := hp.readVarint32()
+			if err != nil {
+				return err
+			}
+			for i := 0; i < int(count); i++ {
+				key, err := hp.ReadString(ctx)
+				if err != nil {
+					return err
+				}
+				value, err := hp.ReadString(ctx)
+				if err != nil {
+					return err
+				}
+				headers[key] = value
+			}
+		} else {
+			// Skip reading info section on the first
+			// unsupported info type.
+			break
+		}
+	}
+	t.readHeaders = headers
+
+	return nil
+}
+
+func (t *THeaderTransport) needReadFrame() bool {
+	if t.clientType == clientUnknown {
+		// This is a new connection that's never read before.
+		return true
+	}
+	if t.isFramed() && t.frameReader == nil {
+		// We just finished the last frame.
+		return true
+	}
+	return false
+}
+
+func (t *THeaderTransport) Read(p []byte) (read int, err error) {
+	// Here using context.Background instead of a context passed in is safe.
+	// First is that there's no way to pass context into this function.
+	// Then, 99% of the case when calling this Read frame is already read
+	// into frameReader. ReadFrame here is more of preventing bugs that
+	// didn't call ReadFrame before calling Read.
+	err = t.ReadFrame(context.Background())
+	if err != nil {
+		return
+	}
+	if t.frameReader != nil {
+		read, err = t.frameReader.Read(p)
+		if err == nil && t.frameBuffer.Len() <= 0 {
+			// the last Read finished the frame, do endOfFrame
+			// handling here.
+			err = t.endOfFrame()
+		} else if err == io.EOF {
+			err = t.endOfFrame()
+			if err != nil {
+				return
+			}
+			if read == 0 {
+				// Try to read the next frame when we hit EOF
+				// (end of frame) immediately.
+				// When we got here, it means the last read
+				// finished the previous frame, but didn't
+				// do endOfFrame handling yet.
+				// We have to read the next frame here,
+				// as otherwise we would return 0 and nil,
+				// which is a case not handled well by most
+				// protocol implementations.
+				return t.Read(p)
+			}
+		}
+		return
+	}
+	return t.reader.Read(p)
+}
+
+// Write writes data to the write buffer.
+//
+// You need to call Flush to actually write them to the transport.
+func (t *THeaderTransport) Write(p []byte) (int, error) {
+	return t.writeBuffer.Write(p)
+}
+
+// Flush writes the appropriate header and the write buffer to the underlying transport.
+func (t *THeaderTransport) Flush(ctx context.Context) error {
+	if t.writeBuffer.Len() == 0 {
+		return nil
+	}
+
+	defer t.writeBuffer.Reset()
+
+	switch t.clientType {
+	default:
+		fallthrough
+	case clientUnknown:
+		t.clientType = clientHeaders
+		fallthrough
+	case clientHeaders:
+		headers := NewTMemoryBuffer()
+		hp := NewTCompactProtocol(headers)
+		hp.SetTConfiguration(t.cfg)
+		if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		for _, transform := range t.writeTransforms {
+			if _, err := hp.writeVarint32(int32(transform)); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+		}
+		if len(t.writeHeaders) > 0 {
+			if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+			if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+			for key, value := range t.writeHeaders {
+				if err := hp.WriteString(ctx, key); err != nil {
+					return NewTTransportExceptionFromError(err)
+				}
+				if err := hp.WriteString(ctx, value); err != nil {
+					return NewTTransportExceptionFromError(err)
+				}
+			}
+		}
+		padding := 4 - headers.Len()%4
+		if padding < 4 {
+			buf := t.buffer[:padding]
+			for i := range buf {
+				buf[i] = 0
+			}
+			if _, err := headers.Write(buf); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+		}
+
+		var payload bytes.Buffer
+		meta := headerMeta{
+			MagicFlags:   THeaderHeaderMagic + t.Flags&THeaderFlagsMask,
+			SequenceID:   t.SequenceID,
+			HeaderLength: uint16(headers.Len() / 4),
+		}
+		if err := binary.Write(&payload, binary.BigEndian, meta); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if _, err := io.Copy(&payload, headers); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+
+		writer, err := NewTransformWriter(&payload, t.writeTransforms)
+		if err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if _, err := io.Copy(writer, &t.writeBuffer); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if err := writer.Close(); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+
+		// First write frame length
+		buf := t.buffer[:size32]
+		binary.BigEndian.PutUint32(buf, uint32(payload.Len()))
+		if _, err := t.transport.Write(buf); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		// Then write the payload
+		if _, err := io.Copy(t.transport, &payload); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+
+	case clientFramedBinary, clientFramedCompact:
+		buf := t.buffer[:size32]
+		binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len()))
+		if _, err := t.transport.Write(buf); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		fallthrough
+	case clientUnframedBinary, clientUnframedCompact:
+		if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+	}
+
+	select {
+	default:
+	case <-ctx.Done():
+		return NewTTransportExceptionFromError(ctx.Err())
+	}
+
+	return t.transport.Flush(ctx)
+}
+
+// Close closes the transport, along with its underlying transport.
+func (t *THeaderTransport) Close() error {
+	if err := t.Flush(context.Background()); err != nil {
+		return err
+	}
+	return t.transport.Close()
+}
+
+// RemainingBytes calls underlying transport's RemainingBytes.
+//
+// Even in framed cases, because of all the possible compression transforms
+// involved, the remaining frame size is likely to be different from the actual
+// remaining readable bytes, so we don't bother to keep tracking the remaining
+// frame size by ourselves and just use the underlying transport's
+// RemainingBytes directly.
+func (t *THeaderTransport) RemainingBytes() uint64 {
+	return t.transport.RemainingBytes()
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (t *THeaderTransport) GetReadHeaders() THeaderMap {
+	return t.readHeaders
+}
+
+// SetWriteHeader sets a header for write.
+func (t *THeaderTransport) SetWriteHeader(key, value string) {
+	t.writeHeaders[key] = value
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (t *THeaderTransport) ClearWriteHeaders() {
+	t.writeHeaders = make(THeaderMap)
+}
+
+// AddTransform add a transform for writing.
+func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error {
+	if !supportedTransformIDs[transform] {
+		return NewTProtocolExceptionWithType(
+			NOT_IMPLEMENTED,
+			fmt.Errorf("THeaderTransformID %d not supported", transform),
+		)
+	}
+	t.writeTransforms = append(t.writeTransforms, transform)
+	return nil
+}
+
+// Protocol returns the wrapped protocol id used in this THeaderTransport.
+func (t *THeaderTransport) Protocol() THeaderProtocolID {
+	switch t.clientType {
+	default:
+		return t.protocolID
+	case clientFramedBinary, clientUnframedBinary:
+		return THeaderProtocolBinary
+	case clientFramedCompact, clientUnframedCompact:
+		return THeaderProtocolCompact
+	}
+}
+
+func (t *THeaderTransport) isFramed() bool {
+	switch t.clientType {
+	default:
+		return false
+	case clientHeaders, clientFramedBinary, clientFramedCompact:
+		return true
+	}
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(t.transport, cfg)
+	t.cfg = cfg
+}
+
+// THeaderTransportFactory is a TTransportFactory implementation to create
+// THeaderTransport.
+//
+// It also implements TConfigurationSetter.
+type THeaderTransportFactory struct {
+	// The underlying factory, could be nil.
+	Factory TTransportFactory
+
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderTransportFactoryConf instead.
+func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
+	return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
+// the given *TConfiguration.
+func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
+	return &THeaderTransportFactory{
+		Factory: factory,
+
+		cfg: conf,
+	}
+}
+
+// GetTransport implements TTransportFactory.
+func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if f.Factory != nil {
+		t, err := f.Factory.GetTransport(trans)
+		if err != nil {
+			return nil, err
+		}
+		return NewTHeaderTransportConf(t, f.cfg), nil
+	}
+	return NewTHeaderTransportConf(trans, f.cfg), nil
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(f.Factory, f.cfg)
+	f.cfg = cfg
+}
+
+var (
+	_ TConfigurationSetter = (*THeaderTransportFactory)(nil)
+	_ TConfigurationSetter = (*THeaderTransport)(nil)
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go
new file mode 100644
index 000000000..9a2cc98cc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"io"
+	"net/http"
+	"net/url"
+	"strconv"
+)
+
+// Default to using the shared http client. Library users are
+// free to change this global client or specify one through
+// THttpClientOptions.
+var DefaultHttpClient *http.Client = http.DefaultClient
+
+type THttpClient struct {
+	client             *http.Client
+	response           *http.Response
+	url                *url.URL
+	requestBuffer      *bytes.Buffer
+	header             http.Header
+	nsecConnectTimeout int64
+	nsecReadTimeout    int64
+}
+
+type THttpClientTransportFactory struct {
+	options THttpClientOptions
+	url     string
+}
+
+func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if trans != nil {
+		t, ok := trans.(*THttpClient)
+		if ok && t.url != nil {
+			return NewTHttpClientWithOptions(t.url.String(), p.options)
+		}
+	}
+	return NewTHttpClientWithOptions(p.url, p.options)
+}
+
+type THttpClientOptions struct {
+	// If nil, DefaultHttpClient is used
+	Client *http.Client
+}
+
+func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
+	return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
+}
+
+func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
+	return &THttpClientTransportFactory{url: url, options: options}
+}
+
+func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
+	parsedURL, err := url.Parse(urlstr)
+	if err != nil {
+		return nil, err
+	}
+	buf := make([]byte, 0, 1024)
+	client := options.Client
+	if client == nil {
+		client = DefaultHttpClient
+	}
+	httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}}
+	return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
+}
+
+func NewTHttpClient(urlstr string) (TTransport, error) {
+	return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
+}
+
+// Set the HTTP Header for this specific Thrift Transport
+// It is important that you first assert the TTransport as a THttpClient type
+// like so:
+//
+// httpTrans := trans.(THttpClient)
+// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
+func (p *THttpClient) SetHeader(key string, value string) {
+	p.header.Add(key, value)
+}
+
+// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
+// It is important that you first assert the TTransport as a THttpClient type
+// like so:
+//
+// httpTrans := trans.(THttpClient)
+// hdrValue := httpTrans.GetHeader("User-Agent")
+func (p *THttpClient) GetHeader(key string) string {
+	return p.header.Get(key)
+}
+
+// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
+// It is important that you first assert the TTransport as a THttpClient type
+// like so:
+//
+// httpTrans := trans.(THttpClient)
+// httpTrans.DelHeader("User-Agent")
+func (p *THttpClient) DelHeader(key string) {
+	p.header.Del(key)
+}
+
+func (p *THttpClient) Open() error {
+	// do nothing
+	return nil
+}
+
+func (p *THttpClient) IsOpen() bool {
+	return p.response != nil || p.requestBuffer != nil
+}
+
+func (p *THttpClient) closeResponse() error {
+	var err error
+	if p.response != nil && p.response.Body != nil {
+		// The docs specify that if keepalive is enabled and the response body is not
+		// read to completion the connection will never be returned to the pool and
+		// reused. Errors are being ignored here because if the connection is invalid
+		// and this fails for some reason, the Close() method will do any remaining
+		// cleanup.
+		io.Copy(io.Discard, p.response.Body)
+
+		err = p.response.Body.Close()
+	}
+
+	p.response = nil
+	return err
+}
+
+func (p *THttpClient) Close() error {
+	if p.requestBuffer != nil {
+		p.requestBuffer.Reset()
+		p.requestBuffer = nil
+	}
+	return p.closeResponse()
+}
+
+func (p *THttpClient) Read(buf []byte) (int, error) {
+	if p.response == nil {
+		return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
+	}
+	n, err := p.response.Body.Read(buf)
+	if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
+		return n, nil
+	}
+	return n, NewTTransportExceptionFromError(err)
+}
+
+func (p *THttpClient) ReadByte() (c byte, err error) {
+	if p.response == nil {
+		return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
+	}
+	return readByte(p.response.Body)
+}
+
+func (p *THttpClient) Write(buf []byte) (int, error) {
+	if p.requestBuffer == nil {
+		return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
+	}
+	return p.requestBuffer.Write(buf)
+}
+
+func (p *THttpClient) WriteByte(c byte) error {
+	if p.requestBuffer == nil {
+		return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
+	}
+	return p.requestBuffer.WriteByte(c)
+}
+
+func (p *THttpClient) WriteString(s string) (n int, err error) {
+	if p.requestBuffer == nil {
+		return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
+	}
+	return p.requestBuffer.WriteString(s)
+}
+
+func (p *THttpClient) Flush(ctx context.Context) error {
+	// Close any previous response body to avoid leaking connections.
+	p.closeResponse()
+
+	// Give up the ownership of the current request buffer to http request,
+	// and create a new buffer for the next request.
+	buf := p.requestBuffer
+	p.requestBuffer = new(bytes.Buffer)
+	req, err := http.NewRequest("POST", p.url.String(), buf)
+	if err != nil {
+		return NewTTransportExceptionFromError(err)
+	}
+	req.Header = p.header
+	if ctx != nil {
+		req = req.WithContext(ctx)
+	}
+	response, err := p.client.Do(req)
+	if err != nil {
+		return NewTTransportExceptionFromError(err)
+	}
+	if response.StatusCode != http.StatusOK {
+		// Close the response to avoid leaking file descriptors. closeResponse does
+		// more than just call Close(), so temporarily assign it and reuse the logic.
+		p.response = response
+		p.closeResponse()
+
+		// TODO(pomack) log bad response
+		return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
+	}
+	p.response = response
+	return nil
+}
+
+func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
+	len := p.response.ContentLength
+	if len >= 0 {
+		return uint64(len)
+	}
+
+	const maxSize = ^uint64(0)
+	return maxSize // the truth is, we just don't know unless framed is used
+}
+
+// Deprecated: Use NewTHttpClientTransportFactory instead.
+func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
+	return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
+}
+
+// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead.
+func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
+	return NewTHttpClientTransportFactoryWithOptions(url, options)
+}
+
+// Deprecated: Use NewTHttpClientWithOptions instead.
+func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
+	return NewTHttpClientWithOptions(urlstr, options)
+}
+
+// Deprecated: Use NewTHttpClient instead.
+func NewTHttpPostClient(urlstr string) (TTransport, error) {
+	return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go
new file mode 100644
index 000000000..bc6922762
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"compress/gzip"
+	"io"
+	"net/http"
+	"strings"
+	"sync"
+)
+
+// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
+func NewThriftHandlerFunc(processor TProcessor,
+	inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
+
+	return gz(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Add("Content-Type", "application/x-thrift")
+
+		transport := NewStreamTransport(r.Body, w)
+		processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
+	})
+}
+
+// gz transparently compresses the HTTP response if the client supports it.
+func gz(handler http.HandlerFunc) http.HandlerFunc {
+	sp := &sync.Pool{
+		New: func() interface{} {
+			return gzip.NewWriter(nil)
+		},
+	}
+
+	return func(w http.ResponseWriter, r *http.Request) {
+		if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
+			handler(w, r)
+			return
+		}
+		w.Header().Set("Content-Encoding", "gzip")
+		gz := sp.Get().(*gzip.Writer)
+		gz.Reset(w)
+		defer func() {
+			_ = gz.Close()
+			sp.Put(gz)
+		}()
+		gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
+		handler(gzw, r)
+	}
+}
+
+type gzipResponseWriter struct {
+	io.Writer
+	http.ResponseWriter
+}
+
+func (w gzipResponseWriter) Write(b []byte) (int, error) {
+	return w.Writer.Write(b)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go
new file mode 100644
index 000000000..1c477990f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"context"
+	"io"
+)
+
+// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
+type StreamTransport struct {
+	io.Reader
+	io.Writer
+	isReadWriter bool
+	closed       bool
+}
+
+type StreamTransportFactory struct {
+	Reader       io.Reader
+	Writer       io.Writer
+	isReadWriter bool
+}
+
+func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if trans != nil {
+		t, ok := trans.(*StreamTransport)
+		if ok {
+			if t.isReadWriter {
+				return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil
+			}
+			if t.Reader != nil && t.Writer != nil {
+				return NewStreamTransport(t.Reader, t.Writer), nil
+			}
+			if t.Reader != nil && t.Writer == nil {
+				return NewStreamTransportR(t.Reader), nil
+			}
+			if t.Reader == nil && t.Writer != nil {
+				return NewStreamTransportW(t.Writer), nil
+			}
+			return &StreamTransport{}, nil
+		}
+	}
+	if p.isReadWriter {
+		return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil
+	}
+	if p.Reader != nil && p.Writer != nil {
+		return NewStreamTransport(p.Reader, p.Writer), nil
+	}
+	if p.Reader != nil && p.Writer == nil {
+		return NewStreamTransportR(p.Reader), nil
+	}
+	if p.Reader == nil && p.Writer != nil {
+		return NewStreamTransportW(p.Writer), nil
+	}
+	return &StreamTransport{}, nil
+}
+
+func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
+	return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
+}
+
+func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
+	return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
+}
+
+func NewStreamTransportR(r io.Reader) *StreamTransport {
+	return &StreamTransport{Reader: bufio.NewReader(r)}
+}
+
+func NewStreamTransportW(w io.Writer) *StreamTransport {
+	return &StreamTransport{Writer: bufio.NewWriter(w)}
+}
+
+func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
+	bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
+	return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
+}
+
+func (p *StreamTransport) IsOpen() bool {
+	return !p.closed
+}
+
+// implicitly opened on creation, can't be reopened once closed
+func (p *StreamTransport) Open() error {
+	if !p.closed {
+		return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
+	} else {
+		return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
+	}
+}
+
+// Closes both the input and output streams.
+func (p *StreamTransport) Close() error {
+	if p.closed {
+		return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
+	}
+	p.closed = true
+	closedReader := false
+	if p.Reader != nil {
+		c, ok := p.Reader.(io.Closer)
+		if ok {
+			e := c.Close()
+			closedReader = true
+			if e != nil {
+				return e
+			}
+		}
+		p.Reader = nil
+	}
+	if p.Writer != nil && (!closedReader || !p.isReadWriter) {
+		c, ok := p.Writer.(io.Closer)
+		if ok {
+			e := c.Close()
+			if e != nil {
+				return e
+			}
+		}
+		p.Writer = nil
+	}
+	return nil
+}
+
+// Flushes the underlying output stream if not null.
+func (p *StreamTransport) Flush(ctx context.Context) error {
+	if p.Writer == nil {
+		return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
+	}
+	f, ok := p.Writer.(Flusher)
+	if ok {
+		err := f.Flush()
+		if err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+	}
+	return nil
+}
+
+func (p *StreamTransport) Read(c []byte) (n int, err error) {
+	n, err = p.Reader.Read(c)
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) ReadByte() (c byte, err error) {
+	f, ok := p.Reader.(io.ByteReader)
+	if ok {
+		c, err = f.ReadByte()
+	} else {
+		c, err = readByte(p.Reader)
+	}
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) Write(c []byte) (n int, err error) {
+	n, err = p.Writer.Write(c)
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) WriteByte(c byte) (err error) {
+	f, ok := p.Writer.(io.ByteWriter)
+	if ok {
+		err = f.WriteByte(c)
+	} else {
+		err = writeByte(p.Writer, c)
+	}
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) WriteString(s string) (n int, err error) {
+	f, ok := p.Writer.(stringWriter)
+	if ok {
+		n, err = f.WriteString(s)
+	} else {
+		n, err = p.Writer.Write([]byte(s))
+	}
+	if err != nil {
+		err = NewTTransportExceptionFromError(err)
+	}
+	return
+}
+
+func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
+	const maxSize = ^uint64(0)
+	return maxSize // the truth is, we just don't know unless framed is used
+}
+
+// SetTConfiguration implements TConfigurationSetter for propagation.
+func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.Reader, conf)
+	PropagateTConfiguration(p.Writer, conf)
+}
+
+var _ TConfigurationSetter = (*StreamTransport)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go
new file mode 100644
index 000000000..8e59d16cf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go
@@ -0,0 +1,591 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+)
+
+const (
+	THRIFT_JSON_PROTOCOL_VERSION = 1
+)
+
+// for references to _ParseContext see tsimplejson_protocol.go
+
+// JSON protocol implementation for thrift.
+// Utilizes Simple JSON protocol
+//
+type TJSONProtocol struct {
+	*TSimpleJSONProtocol
+}
+
+// Constructor
+func NewTJSONProtocol(t TTransport) *TJSONProtocol {
+	v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
+	v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
+	v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
+	return v
+}
+
+// Factory
+type TJSONProtocolFactory struct{}
+
+func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTJSONProtocol(trans)
+}
+
+func NewTJSONProtocolFactory() *TJSONProtocolFactory {
+	return &TJSONProtocolFactory{}
+}
+
+func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
+	p.resetContextStack() // THRIFT-3735
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil {
+		return e
+	}
+	if e := p.WriteString(ctx, name); e != nil {
+		return e
+	}
+	if e := p.WriteByte(ctx, int8(typeId)); e != nil {
+		return e
+	}
+	if e := p.WriteI32(ctx, seqId); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	if e := p.OutputObjectBegin(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error {
+	return p.OutputObjectEnd()
+}
+
+func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	if e := p.WriteI16(ctx, id); e != nil {
+		return e
+	}
+	if e := p.OutputObjectBegin(); e != nil {
+		return e
+	}
+	s, e1 := p.TypeIdToString(typeId)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.WriteString(ctx, s); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error {
+	return p.OutputObjectEnd()
+}
+
+func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
+
+func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	s, e1 := p.TypeIdToString(keyType)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.WriteString(ctx, s); e != nil {
+		return e
+	}
+	s, e1 = p.TypeIdToString(valueType)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.WriteString(ctx, s); e != nil {
+		return e
+	}
+	if e := p.WriteI64(ctx, int64(size)); e != nil {
+		return e
+	}
+	return p.OutputObjectBegin()
+}
+
+func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error {
+	if e := p.OutputObjectEnd(); e != nil {
+		return e
+	}
+	return p.OutputListEnd()
+}
+
+func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error {
+	if b {
+		return p.WriteI32(ctx, 1)
+	}
+	return p.WriteI32(ctx, 0)
+}
+
+func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error {
+	return p.WriteI32(ctx, int32(b))
+}
+
+func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error {
+	return p.WriteI32(ctx, int32(v))
+}
+
+func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
+	return p.OutputF64(v)
+}
+
+func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error {
+	return p.OutputString(v)
+}
+
+func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
+	// JSON library only takes in a string,
+	// not an arbitrary byte array, to ensure bytes are transmitted
+	// efficiently we must convert this into a valid JSON string
+	// therefore we use base64 encoding to avoid excessive escaping/quoting
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	writer := base64.NewEncoder(base64.StdEncoding, p.writer)
+	if _, e := writer.Write(v); e != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+		return NewTProtocolException(e)
+	}
+	if e := writer.Close(); e != nil {
+		return NewTProtocolException(e)
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+// Reading methods.
+func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+	p.resetContextStack() // THRIFT-3735
+	if isNull, err := p.ParseListBegin(); isNull || err != nil {
+		return name, typeId, seqId, err
+	}
+	version, err := p.ReadI32(ctx)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if version != THRIFT_JSON_PROTOCOL_VERSION {
+		e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
+		return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
+
+	}
+	if name, err = p.ReadString(ctx); err != nil {
+		return name, typeId, seqId, err
+	}
+	bTypeId, err := p.ReadByte(ctx)
+	typeId = TMessageType(bTypeId)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if seqId, err = p.ReadI32(ctx); err != nil {
+		return name, typeId, seqId, err
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error {
+	err := p.ParseListEnd()
+	return err
+}
+
+func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	_, err = p.ParseObjectStart()
+	return "", err
+}
+
+func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error {
+	return p.ParseObjectEnd()
+}
+
+func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
+	b, _ := p.reader.Peek(1)
+	if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
+		return "", STOP, -1, nil
+	}
+	fieldId, err := p.ReadI16(ctx)
+	if err != nil {
+		return "", STOP, fieldId, err
+	}
+	if _, err = p.ParseObjectStart(); err != nil {
+		return "", STOP, fieldId, err
+	}
+	sType, err := p.ReadString(ctx)
+	if err != nil {
+		return "", STOP, fieldId, err
+	}
+	fType, err := p.StringToTypeId(sType)
+	return "", fType, fieldId, err
+}
+
+func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error {
+	return p.ParseObjectEnd()
+}
+
+func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, VOID, 0, e
+	}
+
+	// read keyType
+	sKeyType, e := p.ReadString(ctx)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+	keyType, e = p.StringToTypeId(sKeyType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read valueType
+	sValueType, e := p.ReadString(ctx)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+	valueType, e = p.StringToTypeId(sValueType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read size
+	iSize, e := p.ReadI64(ctx)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+	size = int(iSize)
+
+	_, e = p.ParseObjectStart()
+	return keyType, valueType, size, e
+}
+
+func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error {
+	e := p.ParseObjectEnd()
+	if e != nil {
+		return e
+	}
+	return p.ParseListEnd()
+}
+
+func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
+	value, err := p.ReadI32(ctx)
+	return (value != 0), err
+}
+
+func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
+	v, err := p.ReadI64(ctx)
+	return int8(v), err
+}
+
+func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
+	v, err := p.ReadI64(ctx)
+	return int16(v), err
+}
+
+func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
+	v, err := p.ReadI64(ctx)
+	return int32(v), err
+}
+
+func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
+	v, _, err := p.ParseI64()
+	return v, err
+}
+
+func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
+	v, _, err := p.ParseF64()
+	return v, err
+}
+
+func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) {
+	var v string
+	if err := p.ParsePreValue(); err != nil {
+		return v, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseStringBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, p.ParsePostValue()
+}
+
+func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
+	var v []byte
+	if err := p.ParsePreValue(); err != nil {
+		return nil, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseBase64EncodedBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+
+	return v, p.ParsePostValue()
+}
+
+func (p *TJSONProtocol) Flush(ctx context.Context) (err error) {
+	err = p.writer.Flush()
+	if err == nil {
+		err = p.trans.Flush(ctx)
+	}
+	return NewTProtocolException(err)
+}
+
+func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TJSONProtocol) Transport() TTransport {
+	return p.trans
+}
+
+func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	s, e1 := p.TypeIdToString(elemType)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.OutputString(s); e != nil {
+		return e
+	}
+	if e := p.OutputI64(int64(size)); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, 0, e
+	}
+	// We don't really use the ctx in ReadString implementation,
+	// so this is safe for now.
+	// We might want to add context to ParseElemListBegin if we start to use
+	// ctx in ReadString implementation in the future.
+	sElemType, err := p.ReadString(context.Background())
+	if err != nil {
+		return VOID, size, err
+	}
+	elemType, err = p.StringToTypeId(sElemType)
+	if err != nil {
+		return elemType, size, err
+	}
+	nSize, _, err2 := p.ParseI64()
+	size = int(nSize)
+	return elemType, size, err2
+}
+
+func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, 0, e
+	}
+	// We don't really use the ctx in ReadString implementation,
+	// so this is safe for now.
+	// We might want to add context to ParseElemListBegin if we start to use
+	// ctx in ReadString implementation in the future.
+	sElemType, err := p.ReadString(context.Background())
+	if err != nil {
+		return VOID, size, err
+	}
+	elemType, err = p.StringToTypeId(sElemType)
+	if err != nil {
+		return elemType, size, err
+	}
+	nSize, _, err2 := p.ParseI64()
+	size = int(nSize)
+	return elemType, size, err2
+}
+
+func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	s, e1 := p.TypeIdToString(elemType)
+	if e1 != nil {
+		return e1
+	}
+	if e := p.OutputString(s); e != nil {
+		return e
+	}
+	if e := p.OutputI64(int64(size)); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
+	switch byte(fieldType) {
+	case BOOL:
+		return "tf", nil
+	case BYTE:
+		return "i8", nil
+	case I16:
+		return "i16", nil
+	case I32:
+		return "i32", nil
+	case I64:
+		return "i64", nil
+	case DOUBLE:
+		return "dbl", nil
+	case STRING:
+		return "str", nil
+	case STRUCT:
+		return "rec", nil
+	case MAP:
+		return "map", nil
+	case SET:
+		return "set", nil
+	case LIST:
+		return "lst", nil
+	}
+
+	e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
+	return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
+	switch fieldType {
+	case "tf":
+		return TType(BOOL), nil
+	case "i8":
+		return TType(BYTE), nil
+	case "i16":
+		return TType(I16), nil
+	case "i32":
+		return TType(I32), nil
+	case "i64":
+		return TType(I64), nil
+	case "dbl":
+		return TType(DOUBLE), nil
+	case "str":
+		return TType(STRING), nil
+	case "rec":
+		return TType(STRUCT), nil
+	case "map":
+		return TType(MAP), nil
+	case "set":
+		return TType(SET), nil
+	case "lst":
+		return TType(LIST), nil
+	}
+
+	e := fmt.Errorf("Unknown type identifier: %s", fieldType)
+	return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+var _ TConfigurationSetter = (*TJSONProtocol)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go
new file mode 100644
index 000000000..c42aac998
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"log"
+	"os"
+	"testing"
+)
+
+// Logger is a simple wrapper of a logging function.
+//
+// In reality the users might actually use different logging libraries, and they
+// are not always compatible with each other.
+//
+// Logger is meant to be a simple common ground that it's easy to wrap whatever
+// logging library they use into.
+//
+// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
+// discussion behind it.
+type Logger func(msg string)
+
+// NopLogger is a Logger implementation that does nothing.
+func NopLogger(msg string) {}
+
+// StdLogger wraps stdlib log package into a Logger.
+//
+// If logger passed in is nil, it will fallback to use stderr and default flags.
+func StdLogger(logger *log.Logger) Logger {
+	if logger == nil {
+		logger = log.New(os.Stderr, "", log.LstdFlags)
+	}
+	return func(msg string) {
+		logger.Print(msg)
+	}
+}
+
+// TestLogger is a Logger implementation can be used in test codes.
+//
+// It fails the test when being called.
+func TestLogger(tb testing.TB) Logger {
+	return func(msg string) {
+		tb.Errorf("logger called with msg: %q", msg)
+	}
+}
+
+func fallbackLogger(logger Logger) Logger {
+	if logger == nil {
+		return StdLogger(nil)
+	}
+	return logger
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go
new file mode 100644
index 000000000..5936d2730
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+	"context"
+)
+
+// Memory buffer-based implementation of the TTransport interface.
+type TMemoryBuffer struct {
+	*bytes.Buffer
+	size int
+}
+
+type TMemoryBufferTransportFactory struct {
+	size int
+}
+
+func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if trans != nil {
+		t, ok := trans.(*TMemoryBuffer)
+		if ok && t.size > 0 {
+			return NewTMemoryBufferLen(t.size), nil
+		}
+	}
+	return NewTMemoryBufferLen(p.size), nil
+}
+
+func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
+	return &TMemoryBufferTransportFactory{size: size}
+}
+
+func NewTMemoryBuffer() *TMemoryBuffer {
+	return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
+}
+
+func NewTMemoryBufferLen(size int) *TMemoryBuffer {
+	buf := make([]byte, 0, size)
+	return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
+}
+
+func (p *TMemoryBuffer) IsOpen() bool {
+	return true
+}
+
+func (p *TMemoryBuffer) Open() error {
+	return nil
+}
+
+func (p *TMemoryBuffer) Close() error {
+	p.Buffer.Reset()
+	return nil
+}
+
+// Flushing a memory buffer is a no-op
+func (p *TMemoryBuffer) Flush(ctx context.Context) error {
+	return nil
+}
+
+func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
+	return uint64(p.Buffer.Len())
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go
new file mode 100644
index 000000000..25ab2e98a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Message type constants in the Thrift protocol.
+type TMessageType int32
+
+const (
+	INVALID_TMESSAGE_TYPE TMessageType = 0
+	CALL                  TMessageType = 1
+	REPLY                 TMessageType = 2
+	EXCEPTION             TMessageType = 3
+	ONEWAY                TMessageType = 4
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go
new file mode 100644
index 000000000..8a788df02
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the
+// TProcessorFunctions for that TProcessor.
+//
+// Middlewares are passed in the name of the function as set in the processor
+// map of the TProcessor.
+type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction
+
+// WrapProcessor takes an existing TProcessor and wraps each of its inner
+// TProcessorFunctions with the middlewares passed in and returns it.
+//
+// Middlewares will be called in the order that they are defined:
+//
+//		1. Middlewares[0]
+//		2. Middlewares[1]
+//		...
+//		N. Middlewares[n]
+func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor {
+	for name, processorFunc := range processor.ProcessorMap() {
+		wrapped := processorFunc
+		// Add middlewares in reverse so the first in the list is the outermost.
+		for i := len(middlewares) - 1; i >= 0; i-- {
+			wrapped = middlewares[i](name, wrapped)
+		}
+		processor.AddToProcessorMap(name, wrapped)
+	}
+	return processor
+}
+
+// WrappedTProcessorFunction is a convenience struct that implements the
+// TProcessorFunction interface that can be used when implementing custom
+// Middleware.
+type WrappedTProcessorFunction struct {
+	// Wrapped is called by WrappedTProcessorFunction.Process and should be a
+	// "wrapped" call to a base TProcessorFunc.Process call.
+	Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
+}
+
+// Process implements the TProcessorFunction interface using p.Wrapped.
+func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) {
+	return p.Wrapped(ctx, seqID, in, out)
+}
+
+// verify that WrappedTProcessorFunction implements TProcessorFunction
+var (
+	_ TProcessorFunction = WrappedTProcessorFunction{}
+	_ TProcessorFunction = (*WrappedTProcessorFunction)(nil)
+)
+
+// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls
+// with custom middleware.
+type ClientMiddleware func(TClient) TClient
+
+// WrappedTClient is a convenience struct that implements the TClient interface
+// using inner Wrapped function.
+//
+// This is provided to aid in developing ClientMiddleware.
+type WrappedTClient struct {
+	Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
+}
+
+// Call implements the TClient interface by calling and returning c.Wrapped.
+func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
+	return c.Wrapped(ctx, method, args, result)
+}
+
+// verify that WrappedTClient implements TClient
+var (
+	_ TClient = WrappedTClient{}
+	_ TClient = (*WrappedTClient)(nil)
+)
+
+// WrapClient wraps the given TClient in the given middlewares.
+//
+// Middlewares will be called in the order that they are defined:
+//
+//		1. Middlewares[0]
+//		2. Middlewares[1]
+//		...
+//		N. Middlewares[n]
+func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient {
+	// Add middlewares in reverse so the first in the list is the outermost.
+	for i := len(middlewares) - 1; i >= 0; i-- {
+		client = middlewares[i](client)
+	}
+	return client
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go
new file mode 100644
index 000000000..cacbf6bef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"fmt"
+	"strings"
+)
+
+/*
+TMultiplexedProtocol is a protocol-independent concrete decorator
+that allows a Thrift client to communicate with a multiplexing Thrift server,
+by prepending the service name to the function name during function calls.
+
+NOTE: THIS IS NOT USED BY SERVERS.  On the server, use TMultiplexedProcessor to handle request
+from a multiplexing client.
+
+This example uses a single socket transport to invoke two services:
+
+socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
+transport := thrift.NewTFramedTransport(socket)
+protocol := thrift.NewTBinaryProtocolTransport(transport)
+
+mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
+service := Calculator.NewCalculatorClient(mp)
+
+mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
+service2 := WeatherReport.NewWeatherReportClient(mp2)
+
+err := transport.Open()
+if err != nil {
+	t.Fatal("Unable to open client socket", err)
+}
+
+fmt.Println(service.Add(2,2))
+fmt.Println(service2.GetTemperature())
+*/
+
+type TMultiplexedProtocol struct {
+	TProtocol
+	serviceName string
+}
+
+const MULTIPLEXED_SEPARATOR = ":"
+
+func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
+	return &TMultiplexedProtocol{
+		TProtocol:   protocol,
+		serviceName: serviceName,
+	}
+}
+
+func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
+	if typeId == CALL || typeId == ONEWAY {
+		return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
+	} else {
+		return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid)
+	}
+}
+
+/*
+TMultiplexedProcessor is a TProcessor allowing
+a single TServer to provide multiple services.
+
+To do so, you instantiate the processor and then register additional
+processors with it, as shown in the following example:
+
+var processor = thrift.NewTMultiplexedProcessor()
+
+firstProcessor :=
+processor.RegisterProcessor("FirstService", firstProcessor)
+
+processor.registerProcessor(
+  "Calculator",
+  Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
+)
+
+processor.registerProcessor(
+  "WeatherReport",
+  WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
+)
+
+serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
+if err != nil {
+  t.Fatal("Unable to create server socket", err)
+}
+server := thrift.NewTSimpleServer2(processor, serverTransport)
+server.Serve();
+*/
+
+type TMultiplexedProcessor struct {
+	serviceProcessorMap map[string]TProcessor
+	DefaultProcessor    TProcessor
+}
+
+func NewTMultiplexedProcessor() *TMultiplexedProcessor {
+	return &TMultiplexedProcessor{
+		serviceProcessorMap: make(map[string]TProcessor),
+	}
+}
+
+// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}"
+// to TProcessorFunction for any registered processors.  If there is also a
+// DefaultProcessor, the keys for the methods on that processor will simply be
+// "{FunctionName}".  If the TMultiplexedProcessor has both a DefaultProcessor and
+// other registered processors, then the keys will be a mix of both formats.
+//
+// The implementation differs with other TProcessors in that the map returned is
+// a new map, while most TProcessors just return their internal mapping directly.
+// This means that edits to the map returned by this implementation of ProcessorMap
+// will not affect the underlying mapping within the TMultiplexedProcessor.
+func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction {
+	processorFuncMap := make(map[string]TProcessorFunction)
+	for name, processor := range t.serviceProcessorMap {
+		for method, processorFunc := range processor.ProcessorMap() {
+			processorFuncName := name + MULTIPLEXED_SEPARATOR + method
+			processorFuncMap[processorFuncName] = processorFunc
+		}
+	}
+	if t.DefaultProcessor != nil {
+		for method, processorFunc := range t.DefaultProcessor.ProcessorMap() {
+			processorFuncMap[method] = processorFunc
+		}
+	}
+	return processorFuncMap
+}
+
+// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on
+// the format of "name".
+//
+// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}",
+// then it sets the given TProcessorFunction on the inner TProcessor with the
+// ProcessorName component using the FunctionName component.
+//
+// If "name" is just in the format "{FunctionName}", that is to say there is no
+// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor
+// configured, then it will set the given TProcessorFunction on the DefaultProcessor
+// using the given name.
+//
+// If there is not a TProcessor available for the given name, then this function
+// does nothing.  This can happen when there is no TProcessor registered for
+// the given ProcessorName or if all that is given is the FunctionName and there
+// is no DefaultProcessor set.
+func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) {
+	components := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
+	if len(components) != 2 {
+		if t.DefaultProcessor != nil && len(components) == 1 {
+			t.DefaultProcessor.AddToProcessorMap(components[0], processorFunc)
+		}
+		return
+	}
+	processorName := components[0]
+	funcName := components[1]
+	if processor, ok := t.serviceProcessorMap[processorName]; ok {
+		processor.AddToProcessorMap(funcName, processorFunc)
+	}
+
+}
+
+// verify that TMultiplexedProcessor implements TProcessor
+var _ TProcessor = (*TMultiplexedProcessor)(nil)
+
+func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
+	t.DefaultProcessor = processor
+}
+
+func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
+	if t.serviceProcessorMap == nil {
+		t.serviceProcessorMap = make(map[string]TProcessor)
+	}
+	t.serviceProcessorMap[name] = processor
+}
+
+func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
+	name, typeId, seqid, err := in.ReadMessageBegin(ctx)
+	if err != nil {
+		return false, NewTProtocolException(err)
+	}
+	if typeId != CALL && typeId != ONEWAY {
+		return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId))
+	}
+	//extract the service name
+	v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
+	if len(v) != 2 {
+		if t.DefaultProcessor != nil {
+			smb := NewStoredMessageProtocol(in, name, typeId, seqid)
+			return t.DefaultProcessor.Process(ctx, smb, out)
+		}
+		return false, NewTProtocolException(fmt.Errorf(
+			"Service name not found in message name: %s.  Did you forget to use a TMultiplexProtocol in your client?",
+			name,
+		))
+	}
+	actualProcessor, ok := t.serviceProcessorMap[v[0]]
+	if !ok {
+		return false, NewTProtocolException(fmt.Errorf(
+			"Service name not found: %s.  Did you forget to call registerProcessor()?",
+			v[0],
+		))
+	}
+	smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
+	return actualProcessor.Process(ctx, smb, out)
+}
+
+//Protocol that use stored message for ReadMessageBegin
+type storedMessageProtocol struct {
+	TProtocol
+	name   string
+	typeId TMessageType
+	seqid  int32
+}
+
+func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
+	return &storedMessageProtocol{protocol, name, typeId, seqid}
+}
+
+func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) {
+	return s.name, s.typeId, s.seqid, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go
new file mode 100644
index 000000000..e4512d204
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"math"
+	"strconv"
+)
+
+type Numeric interface {
+	Int64() int64
+	Int32() int32
+	Int16() int16
+	Byte() byte
+	Int() int
+	Float64() float64
+	Float32() float32
+	String() string
+	isNull() bool
+}
+
+type numeric struct {
+	iValue int64
+	dValue float64
+	sValue string
+	isNil  bool
+}
+
+var (
+	INFINITY          Numeric
+	NEGATIVE_INFINITY Numeric
+	NAN               Numeric
+	ZERO              Numeric
+	NUMERIC_NULL      Numeric
+)
+
+func NewNumericFromDouble(dValue float64) Numeric {
+	if math.IsInf(dValue, 1) {
+		return INFINITY
+	}
+	if math.IsInf(dValue, -1) {
+		return NEGATIVE_INFINITY
+	}
+	if math.IsNaN(dValue) {
+		return NAN
+	}
+	iValue := int64(dValue)
+	sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
+	isNil := false
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI64(iValue int64) Numeric {
+	dValue := float64(iValue)
+	sValue := strconv.FormatInt(iValue, 10)
+	isNil := false
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI32(iValue int32) Numeric {
+	dValue := float64(iValue)
+	sValue := strconv.FormatInt(int64(iValue), 10)
+	isNil := false
+	return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromString(sValue string) Numeric {
+	if sValue == INFINITY.String() {
+		return INFINITY
+	}
+	if sValue == NEGATIVE_INFINITY.String() {
+		return NEGATIVE_INFINITY
+	}
+	if sValue == NAN.String() {
+		return NAN
+	}
+	iValue, _ := strconv.ParseInt(sValue, 10, 64)
+	dValue, _ := strconv.ParseFloat(sValue, 64)
+	isNil := len(sValue) == 0
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
+	if isNull {
+		return NewNullNumeric()
+	}
+	if sValue == JSON_INFINITY {
+		return INFINITY
+	}
+	if sValue == JSON_NEGATIVE_INFINITY {
+		return NEGATIVE_INFINITY
+	}
+	if sValue == JSON_NAN {
+		return NAN
+	}
+	iValue, _ := strconv.ParseInt(sValue, 10, 64)
+	dValue, _ := strconv.ParseFloat(sValue, 64)
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
+}
+
+func NewNullNumeric() Numeric {
+	return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
+}
+
+func (p *numeric) Int64() int64 {
+	return p.iValue
+}
+
+func (p *numeric) Int32() int32 {
+	return int32(p.iValue)
+}
+
+func (p *numeric) Int16() int16 {
+	return int16(p.iValue)
+}
+
+func (p *numeric) Byte() byte {
+	return byte(p.iValue)
+}
+
+func (p *numeric) Int() int {
+	return int(p.iValue)
+}
+
+func (p *numeric) Float64() float64 {
+	return p.dValue
+}
+
+func (p *numeric) Float32() float32 {
+	return float32(p.dValue)
+}
+
+func (p *numeric) String() string {
+	return p.sValue
+}
+
+func (p *numeric) isNull() bool {
+	return p.isNil
+}
+
+func init() {
+	INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
+	NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
+	NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
+	ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
+	NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go
new file mode 100644
index 000000000..fb564ea81
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+///////////////////////////////////////////////////////////////////////////////
+// This file is home to helpers that convert from various base types to
+// respective pointer types. This is necessary because Go does not permit
+// references to constants, nor can a pointer type to base type be allocated
+// and initialized in a single expression.
+//
+// E.g., this is not allowed:
+//
+//    var ip *int = &5
+//
+// But this *is* allowed:
+//
+//    func IntPtr(i int) *int { return &i }
+//    var ip *int = IntPtr(5)
+//
+// Since pointers to base types are commonplace as [optional] fields in
+// exported thrift structs, we factor such helpers here.
+///////////////////////////////////////////////////////////////////////////////
+
+func Float32Ptr(v float32) *float32 { return &v }
+func Float64Ptr(v float64) *float64 { return &v }
+func IntPtr(v int) *int             { return &v }
+func Int8Ptr(v int8) *int8          { return &v }
+func Int16Ptr(v int16) *int16       { return &v }
+func Int32Ptr(v int32) *int32       { return &v }
+func Int64Ptr(v int64) *int64       { return &v }
+func StringPtr(v string) *string    { return &v }
+func Uint32Ptr(v uint32) *uint32    { return &v }
+func Uint64Ptr(v uint64) *uint64    { return &v }
+func BoolPtr(v bool) *bool          { return &v }
+func ByteSlicePtr(v []byte) *[]byte { return &v }
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go
new file mode 100644
index 000000000..245a3ccfc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+	Process(ctx context.Context, in, out TProtocol) (bool, TException)
+
+	// ProcessorMap returns a map of thrift method names to TProcessorFunctions.
+	ProcessorMap() map[string]TProcessorFunction
+
+	// AddToProcessorMap adds the given TProcessorFunction to the internal
+	// processor map at the given key.
+	//
+	// If one is already set at the given key, it will be replaced with the new
+	// TProcessorFunction.
+	AddToProcessorMap(string, TProcessorFunction)
+}
+
+type TProcessorFunction interface {
+	Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
+}
+
+// The default processor factory just returns a singleton
+// instance.
+type TProcessorFactory interface {
+	GetProcessor(trans TTransport) TProcessor
+}
+
+type tProcessorFactory struct {
+	processor TProcessor
+}
+
+func NewTProcessorFactory(p TProcessor) TProcessorFactory {
+	return &tProcessorFactory{processor: p}
+}
+
+func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
+	return p.processor
+}
+
+/**
+ * The default processor factory just returns a singleton
+ * instance.
+ */
+type TProcessorFunctionFactory interface {
+	GetProcessorFunction(trans TTransport) TProcessorFunction
+}
+
+type tProcessorFunctionFactory struct {
+	processor TProcessorFunction
+}
+
+func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
+	return &tProcessorFunctionFactory{processor: p}
+}
+
+func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
+	return p.processor
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go
new file mode 100644
index 000000000..0a69bd416
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"errors"
+	"fmt"
+)
+
+const (
+	VERSION_MASK = 0xffff0000
+	VERSION_1    = 0x80010000
+)
+
+type TProtocol interface {
+	WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
+	WriteMessageEnd(ctx context.Context) error
+	WriteStructBegin(ctx context.Context, name string) error
+	WriteStructEnd(ctx context.Context) error
+	WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
+	WriteFieldEnd(ctx context.Context) error
+	WriteFieldStop(ctx context.Context) error
+	WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
+	WriteMapEnd(ctx context.Context) error
+	WriteListBegin(ctx context.Context, elemType TType, size int) error
+	WriteListEnd(ctx context.Context) error
+	WriteSetBegin(ctx context.Context, elemType TType, size int) error
+	WriteSetEnd(ctx context.Context) error
+	WriteBool(ctx context.Context, value bool) error
+	WriteByte(ctx context.Context, value int8) error
+	WriteI16(ctx context.Context, value int16) error
+	WriteI32(ctx context.Context, value int32) error
+	WriteI64(ctx context.Context, value int64) error
+	WriteDouble(ctx context.Context, value float64) error
+	WriteString(ctx context.Context, value string) error
+	WriteBinary(ctx context.Context, value []byte) error
+
+	ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
+	ReadMessageEnd(ctx context.Context) error
+	ReadStructBegin(ctx context.Context) (name string, err error)
+	ReadStructEnd(ctx context.Context) error
+	ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
+	ReadFieldEnd(ctx context.Context) error
+	ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
+	ReadMapEnd(ctx context.Context) error
+	ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
+	ReadListEnd(ctx context.Context) error
+	ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
+	ReadSetEnd(ctx context.Context) error
+	ReadBool(ctx context.Context) (value bool, err error)
+	ReadByte(ctx context.Context) (value int8, err error)
+	ReadI16(ctx context.Context) (value int16, err error)
+	ReadI32(ctx context.Context) (value int32, err error)
+	ReadI64(ctx context.Context) (value int64, err error)
+	ReadDouble(ctx context.Context) (value float64, err error)
+	ReadString(ctx context.Context) (value string, err error)
+	ReadBinary(ctx context.Context) (value []byte, err error)
+
+	Skip(ctx context.Context, fieldType TType) (err error)
+	Flush(ctx context.Context) (err error)
+
+	Transport() TTransport
+}
+
+// The maximum recursive depth the skip() function will traverse
+const DEFAULT_RECURSION_DEPTH = 64
+
+// Skips over the next data element from the provided input TProtocol object.
+func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
+	return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
+}
+
+// Skips over the next data element from the provided input TProtocol object.
+func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
+
+	if maxDepth <= 0 {
+		return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
+	}
+
+	switch fieldType {
+	case BOOL:
+		_, err = self.ReadBool(ctx)
+		return
+	case BYTE:
+		_, err = self.ReadByte(ctx)
+		return
+	case I16:
+		_, err = self.ReadI16(ctx)
+		return
+	case I32:
+		_, err = self.ReadI32(ctx)
+		return
+	case I64:
+		_, err = self.ReadI64(ctx)
+		return
+	case DOUBLE:
+		_, err = self.ReadDouble(ctx)
+		return
+	case STRING:
+		_, err = self.ReadString(ctx)
+		return
+	case STRUCT:
+		if _, err = self.ReadStructBegin(ctx); err != nil {
+			return err
+		}
+		for {
+			_, typeId, _, _ := self.ReadFieldBegin(ctx)
+			if typeId == STOP {
+				break
+			}
+			err := Skip(ctx, self, typeId, maxDepth-1)
+			if err != nil {
+				return err
+			}
+			self.ReadFieldEnd(ctx)
+		}
+		return self.ReadStructEnd(ctx)
+	case MAP:
+		keyType, valueType, size, err := self.ReadMapBegin(ctx)
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(ctx, self, keyType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+			self.Skip(ctx, valueType)
+		}
+		return self.ReadMapEnd(ctx)
+	case SET:
+		elemType, size, err := self.ReadSetBegin(ctx)
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(ctx, self, elemType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+		}
+		return self.ReadSetEnd(ctx)
+	case LIST:
+		elemType, size, err := self.ReadListBegin(ctx)
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(ctx, self, elemType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+		}
+		return self.ReadListEnd(ctx)
+	default:
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
+	}
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go
new file mode 100644
index 000000000..9dcf4bfd9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"encoding/base64"
+	"errors"
+)
+
+// Thrift Protocol exception
+type TProtocolException interface {
+	TException
+	TypeId() int
+}
+
+const (
+	UNKNOWN_PROTOCOL_EXCEPTION = 0
+	INVALID_DATA               = 1
+	NEGATIVE_SIZE              = 2
+	SIZE_LIMIT                 = 3
+	BAD_VERSION                = 4
+	NOT_IMPLEMENTED            = 5
+	DEPTH_LIMIT                = 6
+)
+
+type tProtocolException struct {
+	typeId int
+	err    error
+	msg    string
+}
+
+var _ TProtocolException = (*tProtocolException)(nil)
+
+func (tProtocolException) TExceptionType() TExceptionType {
+	return TExceptionTypeProtocol
+}
+
+func (p *tProtocolException) TypeId() int {
+	return p.typeId
+}
+
+func (p *tProtocolException) String() string {
+	return p.msg
+}
+
+func (p *tProtocolException) Error() string {
+	return p.msg
+}
+
+func (p *tProtocolException) Unwrap() error {
+	return p.err
+}
+
+func NewTProtocolException(err error) TProtocolException {
+	if err == nil {
+		return nil
+	}
+
+	if e, ok := err.(TProtocolException); ok {
+		return e
+	}
+
+	if errors.As(err, new(base64.CorruptInputError)) {
+		return NewTProtocolExceptionWithType(INVALID_DATA, err)
+	}
+
+	return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
+}
+
+func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
+	if err == nil {
+		return nil
+	}
+	return &tProtocolException{
+		typeId: errType,
+		err:    err,
+		msg:    err.Error(),
+	}
+}
+
+func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
+	return &tProtocolException{
+		typeId: err.TypeId(),
+		err:    err,
+		msg:    prepend + err.Error(),
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go
new file mode 100644
index 000000000..c40f796d8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory interface for constructing protocol instances.
+type TProtocolFactory interface {
+	GetProtocol(trans TTransport) TProtocol
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go
new file mode 100644
index 000000000..d884c6ac6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type responseHelperKey struct{}
+
+// TResponseHelper defines a object with a set of helper functions that can be
+// retrieved from the context object passed into server handler functions.
+//
+// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
+// from the context object.
+//
+// The zero value of TResponseHelper is valid with all helper functions being
+// no-op.
+type TResponseHelper struct {
+	// THeader related functions
+	*THeaderResponseHelper
+}
+
+// THeaderResponseHelper defines THeader related TResponseHelper functions.
+//
+// The zero value of *THeaderResponseHelper is valid with all helper functions
+// being no-op.
+type THeaderResponseHelper struct {
+	proto *THeaderProtocol
+}
+
+// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
+// underlying TProtocol.
+func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
+	if hp, ok := proto.(*THeaderProtocol); ok {
+		return &THeaderResponseHelper{
+			proto: hp,
+		}
+	}
+	return nil
+}
+
+// SetHeader sets a response header.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) SetHeader(key, value string) {
+	if h != nil && h.proto != nil {
+		h.proto.SetWriteHeader(key, value)
+	}
+}
+
+// ClearHeaders clears all the response headers previously set.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) ClearHeaders() {
+	if h != nil && h.proto != nil {
+		h.proto.ClearWriteHeaders()
+	}
+}
+
+// GetResponseHelper retrieves the TResponseHelper implementation injected into
+// the context object.
+//
+// If no helper was found in the context object, a nop helper with ok == false
+// will be returned.
+func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
+	if v := ctx.Value(responseHelperKey{}); v != nil {
+		helper, ok = v.(TResponseHelper)
+	}
+	return
+}
+
+// SetResponseHelper injects TResponseHelper into the context object.
+func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
+	return context.WithValue(ctx, responseHelperKey{}, helper)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go
new file mode 100644
index 000000000..83fdf29f5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"io"
+)
+
+type RichTransport struct {
+	TTransport
+}
+
+// Wraps Transport to provide TRichTransport interface
+func NewTRichTransport(trans TTransport) *RichTransport {
+	return &RichTransport{trans}
+}
+
+func (r *RichTransport) ReadByte() (c byte, err error) {
+	return readByte(r.TTransport)
+}
+
+func (r *RichTransport) WriteByte(c byte) error {
+	return writeByte(r.TTransport, c)
+}
+
+func (r *RichTransport) WriteString(s string) (n int, err error) {
+	return r.Write([]byte(s))
+}
+
+func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
+	return r.TTransport.RemainingBytes()
+}
+
+func readByte(r io.Reader) (c byte, err error) {
+	v := [1]byte{0}
+	n, err := r.Read(v[0:1])
+	if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
+		return v[0], nil
+	}
+	if n > 0 && err != nil {
+		return v[0], err
+	}
+	if err != nil {
+		return 0, err
+	}
+	return v[0], nil
+}
+
+func writeByte(w io.Writer, c byte) error {
+	v := [1]byte{c}
+	_, err := w.Write(v[0:1])
+	return err
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go
new file mode 100644
index 000000000..c44979094
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"sync"
+)
+
+type TSerializer struct {
+	Transport *TMemoryBuffer
+	Protocol  TProtocol
+}
+
+type TStruct interface {
+	Write(ctx context.Context, p TProtocol) error
+	Read(ctx context.Context, p TProtocol) error
+}
+
+func NewTSerializer() *TSerializer {
+	transport := NewTMemoryBufferLen(1024)
+	protocol := NewTBinaryProtocolTransport(transport)
+
+	return &TSerializer{
+		Transport: transport,
+		Protocol:  protocol,
+	}
+}
+
+func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
+	t.Transport.Reset()
+
+	if err = msg.Write(ctx, t.Protocol); err != nil {
+		return
+	}
+
+	if err = t.Protocol.Flush(ctx); err != nil {
+		return
+	}
+	if err = t.Transport.Flush(ctx); err != nil {
+		return
+	}
+
+	return t.Transport.String(), nil
+}
+
+func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
+	t.Transport.Reset()
+
+	if err = msg.Write(ctx, t.Protocol); err != nil {
+		return
+	}
+
+	if err = t.Protocol.Flush(ctx); err != nil {
+		return
+	}
+
+	if err = t.Transport.Flush(ctx); err != nil {
+		return
+	}
+
+	b = append(b, t.Transport.Bytes()...)
+	return
+}
+
+// TSerializerPool is the thread-safe version of TSerializer, it uses resource
+// pool of TSerializer under the hood.
+//
+// It must be initialized with either NewTSerializerPool or
+// NewTSerializerPoolSizeFactory.
+type TSerializerPool struct {
+	pool sync.Pool
+}
+
+// NewTSerializerPool creates a new TSerializerPool.
+//
+// NewTSerializer can be used as the arg here.
+func NewTSerializerPool(f func() *TSerializer) *TSerializerPool {
+	return &TSerializerPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return f()
+			},
+		},
+	}
+}
+
+// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given
+// size and protocol factory.
+//
+// Note that the size is not the limit. The TMemoryBuffer underneath can grow
+// larger than that. It just dictates the initial size.
+func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool {
+	return &TSerializerPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				transport := NewTMemoryBufferLen(size)
+				protocol := factory.GetProtocol(transport)
+
+				return &TSerializer{
+					Transport: transport,
+					Protocol:  protocol,
+				}
+			},
+		},
+	}
+}
+
+func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) {
+	s := t.pool.Get().(*TSerializer)
+	defer t.pool.Put(s)
+	return s.WriteString(ctx, msg)
+}
+
+func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) {
+	s := t.pool.Get().(*TSerializer)
+	defer t.pool.Put(s)
+	return s.Write(ctx, msg)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go
new file mode 100644
index 000000000..f813fa353
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+type TServer interface {
+	ProcessorFactory() TProcessorFactory
+	ServerTransport() TServerTransport
+	InputTransportFactory() TTransportFactory
+	OutputTransportFactory() TTransportFactory
+	InputProtocolFactory() TProtocolFactory
+	OutputProtocolFactory() TProtocolFactory
+
+	// Starts the server
+	Serve() error
+	// Stops the server. This is optional on a per-implementation basis. Not
+	// all servers are required to be cleanly stoppable.
+	Stop() error
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go
new file mode 100644
index 000000000..7dd24ae36
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"net"
+	"sync"
+	"time"
+)
+
+type TServerSocket struct {
+	listener      net.Listener
+	addr          net.Addr
+	clientTimeout time.Duration
+
+	// Protects the interrupted value to make it thread safe.
+	mu          sync.RWMutex
+	interrupted bool
+}
+
+func NewTServerSocket(listenAddr string) (*TServerSocket, error) {
+	return NewTServerSocketTimeout(listenAddr, 0)
+}
+
+func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) {
+	addr, err := net.ResolveTCPAddr("tcp", listenAddr)
+	if err != nil {
+		return nil, err
+	}
+	return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
+}
+
+// Creates a TServerSocket from a net.Addr
+func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket {
+	return &TServerSocket{addr: addr, clientTimeout: clientTimeout}
+}
+
+func (p *TServerSocket) Listen() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.IsListening() {
+		return nil
+	}
+	l, err := net.Listen(p.addr.Network(), p.addr.String())
+	if err != nil {
+		return err
+	}
+	p.listener = l
+	return nil
+}
+
+func (p *TServerSocket) Accept() (TTransport, error) {
+	p.mu.RLock()
+	interrupted := p.interrupted
+	p.mu.RUnlock()
+
+	if interrupted {
+		return nil, errTransportInterrupted
+	}
+
+	p.mu.Lock()
+	listener := p.listener
+	p.mu.Unlock()
+	if listener == nil {
+		return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
+	}
+
+	conn, err := listener.Accept()
+	if err != nil {
+		return nil, NewTTransportExceptionFromError(err)
+	}
+	return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil
+}
+
+// Checks whether the socket is listening.
+func (p *TServerSocket) IsListening() bool {
+	return p.listener != nil
+}
+
+// Connects the socket, creating a new socket object if necessary.
+func (p *TServerSocket) Open() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.IsListening() {
+		return NewTTransportException(ALREADY_OPEN, "Server socket already open")
+	}
+	if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
+		return err
+	} else {
+		p.listener = l
+	}
+	return nil
+}
+
+func (p *TServerSocket) Addr() net.Addr {
+	if p.listener != nil {
+		return p.listener.Addr()
+	}
+	return p.addr
+}
+
+func (p *TServerSocket) Close() error {
+	var err error
+	p.mu.Lock()
+	if p.IsListening() {
+		err = p.listener.Close()
+		p.listener = nil
+	}
+	p.mu.Unlock()
+	return err
+}
+
+func (p *TServerSocket) Interrupt() error {
+	p.mu.Lock()
+	p.interrupted = true
+	p.mu.Unlock()
+	p.Close()
+
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go
new file mode 100644
index 000000000..51c40b64a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Server transport. Object which provides client transports.
+type TServerTransport interface {
+	Listen() error
+	Accept() (TTransport, error)
+	Close() error
+
+	// Optional method implementation. This signals to the server transport
+	// that it should break out of any accept() or listen() that it is currently
+	// blocked on. This method, if implemented, MUST be thread safe, as it may
+	// be called from a different thread context than the other TServerTransport
+	// methods.
+	Interrupt() error
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go
new file mode 100644
index 000000000..d1a815453
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go
@@ -0,0 +1,1373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+)
+
+type _ParseContext int
+
+const (
+	_CONTEXT_INVALID              _ParseContext = iota
+	_CONTEXT_IN_TOPLEVEL                        // 1
+	_CONTEXT_IN_LIST_FIRST                      // 2
+	_CONTEXT_IN_LIST                            // 3
+	_CONTEXT_IN_OBJECT_FIRST                    // 4
+	_CONTEXT_IN_OBJECT_NEXT_KEY                 // 5
+	_CONTEXT_IN_OBJECT_NEXT_VALUE               // 6
+)
+
+func (p _ParseContext) String() string {
+	switch p {
+	case _CONTEXT_IN_TOPLEVEL:
+		return "TOPLEVEL"
+	case _CONTEXT_IN_LIST_FIRST:
+		return "LIST-FIRST"
+	case _CONTEXT_IN_LIST:
+		return "LIST"
+	case _CONTEXT_IN_OBJECT_FIRST:
+		return "OBJECT-FIRST"
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		return "OBJECT-NEXT-KEY"
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		return "OBJECT-NEXT-VALUE"
+	}
+	return "UNKNOWN-PARSE-CONTEXT"
+}
+
+type jsonContextStack []_ParseContext
+
+func (s *jsonContextStack) push(v _ParseContext) {
+	*s = append(*s, v)
+}
+
+func (s jsonContextStack) peek() (v _ParseContext, ok bool) {
+	l := len(s)
+	if l <= 0 {
+		return
+	}
+	return s[l-1], true
+}
+
+func (s *jsonContextStack) pop() (v _ParseContext, ok bool) {
+	l := len(*s)
+	if l <= 0 {
+		return
+	}
+	v = (*s)[l-1]
+	*s = (*s)[0 : l-1]
+	return v, true
+}
+
+var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack"))
+
+// Simple JSON protocol implementation for thrift.
+//
+// This protocol produces/consumes a simple output format
+// suitable for parsing by scripting languages.  It should not be
+// confused with the full-featured TJSONProtocol.
+//
+type TSimpleJSONProtocol struct {
+	trans TTransport
+
+	parseContextStack jsonContextStack
+	dumpContext       jsonContextStack
+
+	writer *bufio.Writer
+	reader *bufio.Reader
+}
+
+// Constructor
+func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
+	v := &TSimpleJSONProtocol{trans: t,
+		writer: bufio.NewWriter(t),
+		reader: bufio.NewReader(t),
+	}
+	v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
+	v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
+	return v
+}
+
+// Factory
+type TSimpleJSONProtocolFactory struct{}
+
+func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTSimpleJSONProtocol(trans)
+}
+
+func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
+	return &TSimpleJSONProtocolFactory{}
+}
+
+var (
+	JSON_COMMA                   []byte
+	JSON_COLON                   []byte
+	JSON_LBRACE                  []byte
+	JSON_RBRACE                  []byte
+	JSON_LBRACKET                []byte
+	JSON_RBRACKET                []byte
+	JSON_QUOTE                   byte
+	JSON_QUOTE_BYTES             []byte
+	JSON_NULL                    []byte
+	JSON_TRUE                    []byte
+	JSON_FALSE                   []byte
+	JSON_INFINITY                string
+	JSON_NEGATIVE_INFINITY       string
+	JSON_NAN                     string
+	JSON_INFINITY_BYTES          []byte
+	JSON_NEGATIVE_INFINITY_BYTES []byte
+	JSON_NAN_BYTES               []byte
+	json_nonbase_map_elem_bytes  []byte
+)
+
+func init() {
+	JSON_COMMA = []byte{','}
+	JSON_COLON = []byte{':'}
+	JSON_LBRACE = []byte{'{'}
+	JSON_RBRACE = []byte{'}'}
+	JSON_LBRACKET = []byte{'['}
+	JSON_RBRACKET = []byte{']'}
+	JSON_QUOTE = '"'
+	JSON_QUOTE_BYTES = []byte{'"'}
+	JSON_NULL = []byte{'n', 'u', 'l', 'l'}
+	JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
+	JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
+	JSON_INFINITY = "Infinity"
+	JSON_NEGATIVE_INFINITY = "-Infinity"
+	JSON_NAN = "NaN"
+	JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+	JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+	JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
+	json_nonbase_map_elem_bytes = []byte{']', ',', '['}
+}
+
+func jsonQuote(s string) string {
+	b, _ := json.Marshal(s)
+	s1 := string(b)
+	return s1
+}
+
+func jsonUnquote(s string) (string, bool) {
+	s1 := new(string)
+	err := json.Unmarshal([]byte(s), s1)
+	return *s1, err == nil
+}
+
+func mismatch(expected, actual string) error {
+	return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
+	p.resetContextStack() // THRIFT-3735
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteString(ctx, name); e != nil {
+		return e
+	}
+	if e := p.WriteByte(ctx, int8(typeId)); e != nil {
+		return e
+	}
+	if e := p.WriteI32(ctx, seqId); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	if e := p.OutputObjectBegin(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error {
+	return p.OutputObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	if e := p.WriteString(ctx, name); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
+
+func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteByte(ctx, int8(keyType)); e != nil {
+		return e
+	}
+	if e := p.WriteByte(ctx, int8(valueType)); e != nil {
+		return e
+	}
+	return p.WriteI32(ctx, int32(size))
+}
+
+func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error {
+	return p.OutputBool(b)
+}
+
+func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error {
+	return p.WriteI32(ctx, int32(b))
+}
+
+func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error {
+	return p.WriteI32(ctx, int32(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
+	return p.OutputF64(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error {
+	return p.OutputString(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
+	// JSON library only takes in a string,
+	// not an arbitrary byte array, to ensure bytes are transmitted
+	// efficiently we must convert this into a valid JSON string
+	// therefore we use base64 encoding to avoid excessive escaping/quoting
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	writer := base64.NewEncoder(base64.StdEncoding, p.writer)
+	if _, e := writer.Write(v); e != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+		return NewTProtocolException(e)
+	}
+	if e := writer.Close(); e != nil {
+		return NewTProtocolException(e)
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+// Reading methods.
+func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+	p.resetContextStack() // THRIFT-3735
+	if isNull, err := p.ParseListBegin(); isNull || err != nil {
+		return name, typeId, seqId, err
+	}
+	if name, err = p.ReadString(ctx); err != nil {
+		return name, typeId, seqId, err
+	}
+	bTypeId, err := p.ReadByte(ctx)
+	typeId = TMessageType(bTypeId)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if seqId, err = p.ReadI32(ctx); err != nil {
+		return name, typeId, seqId, err
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	_, err = p.ParseObjectStart()
+	return "", err
+}
+
+func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error {
+	return p.ParseObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return "", STOP, 0, err
+	}
+	b, _ := p.reader.Peek(1)
+	if len(b) > 0 {
+		switch b[0] {
+		case JSON_RBRACE[0]:
+			return "", STOP, 0, nil
+		case JSON_QUOTE:
+			p.reader.ReadByte()
+			name, err := p.ParseStringBody()
+			// simplejson is not meant to be read back into thrift
+			// - see http://wiki.apache.org/thrift/ThriftUsageJava
+			// - use JSON instead
+			if err != nil {
+				return name, STOP, 0, err
+			}
+			return name, STOP, -1, p.ParsePostValue()
+		}
+		e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
+		return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return "", STOP, 0, NewTProtocolException(io.EOF)
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, VOID, 0, e
+	}
+
+	// read keyType
+	bKeyType, e := p.ReadByte(ctx)
+	keyType = TType(bKeyType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read valueType
+	bValueType, e := p.ReadByte(ctx)
+	valueType = TType(bValueType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read size
+	iSize, err := p.ReadI64(ctx)
+	size = int(iSize)
+	return keyType, valueType, size, err
+}
+
+func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
+	var value bool
+
+	if err := p.ParsePreValue(); err != nil {
+		return value, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 {
+		switch f[0] {
+		case JSON_TRUE[0]:
+			b := make([]byte, len(JSON_TRUE))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_TRUE) {
+				value = true
+			} else {
+				e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			break
+		case JSON_FALSE[0]:
+			b := make([]byte, len(JSON_FALSE))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_FALSE) {
+				value = false
+			} else {
+				e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			break
+		case JSON_NULL[0]:
+			b := make([]byte, len(JSON_NULL))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_NULL) {
+				value = false
+			} else {
+				e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		default:
+			e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
+			return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	return value, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
+	v, err := p.ReadI64(ctx)
+	return int8(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
+	v, err := p.ReadI64(ctx)
+	return int16(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
+	v, err := p.ReadI64(ctx)
+	return int32(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
+	v, _, err := p.ParseI64()
+	return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
+	v, _, err := p.ParseF64()
+	return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) {
+	var v string
+	if err := p.ParsePreValue(); err != nil {
+		return v, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseStringBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
+	var v []byte
+	if err := p.ParsePreValue(); err != nil {
+		return nil, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseBase64EncodedBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+
+	return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) {
+	return NewTProtocolException(p.writer.Flush())
+}
+
+func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TSimpleJSONProtocol) Transport() TTransport {
+	return p.trans
+}
+
+func (p *TSimpleJSONProtocol) OutputPreValue() error {
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	switch cxt {
+	case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		if _, e := p.write(JSON_COMMA); e != nil {
+			return NewTProtocolException(e)
+		}
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		if _, e := p.write(JSON_COLON); e != nil {
+			return NewTProtocolException(e)
+		}
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputPostValue() error {
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	switch cxt {
+	case _CONTEXT_IN_LIST_FIRST:
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_LIST)
+	case _CONTEXT_IN_OBJECT_FIRST:
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	var v string
+	if value {
+		v = string(JSON_TRUE)
+	} else {
+		v = string(JSON_FALSE)
+	}
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	switch cxt {
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		v = jsonQuote(v)
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputNull() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_NULL); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	var v string
+	if math.IsNaN(value) {
+		v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
+	} else if math.IsInf(value, 1) {
+		v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
+	} else if math.IsInf(value, -1) {
+		v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
+	} else {
+		cxt, ok := p.dumpContext.peek()
+		if !ok {
+			return errEmptyJSONContextStack
+		}
+		v = strconv.FormatFloat(value, 'g', -1, 64)
+		switch cxt {
+		case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+			v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
+		}
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	v := strconv.FormatInt(value, 10)
+	switch cxt {
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		v = jsonQuote(v)
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputString(s string) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if e := p.OutputStringData(jsonQuote(s)); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
+	_, e := p.write([]byte(s))
+	return NewTProtocolException(e)
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_LBRACE); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST)
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
+	if _, e := p.write(JSON_RBRACE); e != nil {
+		return NewTProtocolException(e)
+	}
+	_, ok := p.dumpContext.pop()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	if e := p.OutputPostValue(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListBegin() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_LBRACKET); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext.push(_CONTEXT_IN_LIST_FIRST)
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListEnd() error {
+	if _, e := p.write(JSON_RBRACKET); e != nil {
+		return NewTProtocolException(e)
+	}
+	_, ok := p.dumpContext.pop()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	if e := p.OutputPostValue(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.OutputI64(int64(elemType)); e != nil {
+		return e
+	}
+	if e := p.OutputI64(int64(size)); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePreValue() error {
+	if e := p.readNonSignificantWhitespace(); e != nil {
+		return NewTProtocolException(e)
+	}
+	cxt, ok := p.parseContextStack.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	b, _ := p.reader.Peek(1)
+	switch cxt {
+	case _CONTEXT_IN_LIST:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_RBRACKET[0]:
+				return nil
+			case JSON_COMMA[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_RBRACE[0]:
+				return nil
+			case JSON_COMMA[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_COLON[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePostValue() error {
+	if e := p.readNonSignificantWhitespace(); e != nil {
+		return NewTProtocolException(e)
+	}
+	cxt, ok := p.parseContextStack.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	switch cxt {
+	case _CONTEXT_IN_LIST_FIRST:
+		p.parseContextStack.pop()
+		p.parseContextStack.push(_CONTEXT_IN_LIST)
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		p.parseContextStack.pop()
+		p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		p.parseContextStack.pop()
+		p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
+	for {
+		b, _ := p.reader.Peek(1)
+		if len(b) < 1 {
+			return nil
+		}
+		switch b[0] {
+		case ' ', '\r', '\n', '\t':
+			p.reader.ReadByte()
+			continue
+		default:
+			break
+		}
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
+	line, err := p.reader.ReadString(JSON_QUOTE)
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	l := len(line)
+	// count number of escapes to see if we need to keep going
+	i := 1
+	for ; i < l; i++ {
+		if line[l-i-1] != '\\' {
+			break
+		}
+	}
+	if i&0x01 == 1 {
+		v, ok := jsonUnquote(string(JSON_QUOTE) + line)
+		if !ok {
+			return "", NewTProtocolException(err)
+		}
+		return v, nil
+	}
+	s, err := p.ParseQuotedStringBody()
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	str := string(JSON_QUOTE) + line + s
+	v, ok := jsonUnquote(str)
+	if !ok {
+		e := fmt.Errorf("Unable to parse as JSON string %s", str)
+		return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
+	line, err := p.reader.ReadString(JSON_QUOTE)
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	l := len(line)
+	// count number of escapes to see if we need to keep going
+	i := 1
+	for ; i < l; i++ {
+		if line[l-i-1] != '\\' {
+			break
+		}
+	}
+	if i&0x01 == 1 {
+		return line, nil
+	}
+	s, err := p.ParseQuotedStringBody()
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	v := line + s
+	return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
+	line, err := p.reader.ReadBytes(JSON_QUOTE)
+	if err != nil {
+		return line, NewTProtocolException(err)
+	}
+	line2 := line[0 : len(line)-1]
+	l := len(line2)
+	if (l % 4) != 0 {
+		pad := 4 - (l % 4)
+		fill := [...]byte{'=', '=', '='}
+		line2 = append(line2, fill[:pad]...)
+		l = len(line2)
+	}
+	output := make([]byte, base64.StdEncoding.DecodedLen(l))
+	n, err := base64.StdEncoding.Decode(output, line2)
+	return output[0:n], NewTProtocolException(err)
+}
+
+func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return 0, false, err
+	}
+	var value int64
+	var isnull bool
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		isnull = true
+	} else {
+		num, err := p.readNumeric()
+		isnull = (num == nil)
+		if !isnull {
+			value = num.Int64()
+		}
+		if err != nil {
+			return value, isnull, err
+		}
+	}
+	return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return 0, false, err
+	}
+	var value float64
+	var isnull bool
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		isnull = true
+	} else {
+		num, err := p.readNumeric()
+		isnull = (num == nil)
+		if !isnull {
+			value = num.Float64()
+		}
+		if err != nil {
+			return value, isnull, err
+		}
+	}
+	return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return false, err
+	}
+	var b []byte
+	b, err := p.reader.Peek(1)
+	if err != nil {
+		return false, err
+	}
+	if len(b) > 0 && b[0] == JSON_LBRACE[0] {
+		p.reader.ReadByte()
+		p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST)
+		return false, nil
+	} else if p.safePeekContains(JSON_NULL) {
+		return true, nil
+	}
+	e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
+	return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
+	if isNull, err := p.readIfNull(); isNull || err != nil {
+		return err
+	}
+	cxt, _ := p.parseContextStack.peek()
+	if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
+		e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
+		return NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	line, err := p.reader.ReadString(JSON_RBRACE[0])
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	for _, char := range line {
+		switch char {
+		default:
+			e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
+			return NewTProtocolExceptionWithType(INVALID_DATA, e)
+		case ' ', '\n', '\r', '\t', '}':
+			break
+		}
+	}
+	p.parseContextStack.pop()
+	return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
+	if e := p.ParsePreValue(); e != nil {
+		return false, e
+	}
+	var b []byte
+	b, err = p.reader.Peek(1)
+	if err != nil {
+		return false, err
+	}
+	if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
+		p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST)
+		p.reader.ReadByte()
+		isNull = false
+	} else if p.safePeekContains(JSON_NULL) {
+		isNull = true
+	} else {
+		err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
+	}
+	return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
+}
+
+func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, 0, e
+	}
+	bElemType, _, err := p.ParseI64()
+	elemType = TType(bElemType)
+	if err != nil {
+		return elemType, size, err
+	}
+	nSize, _, err2 := p.ParseI64()
+	size = int(nSize)
+	return elemType, size, err2
+}
+
+func (p *TSimpleJSONProtocol) ParseListEnd() error {
+	if isNull, err := p.readIfNull(); isNull || err != nil {
+		return err
+	}
+	cxt, _ := p.parseContextStack.peek()
+	if cxt != _CONTEXT_IN_LIST {
+		e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
+		return NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	line, err := p.reader.ReadString(JSON_RBRACKET[0])
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	for _, char := range line {
+		switch char {
+		default:
+			e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line)
+			return NewTProtocolExceptionWithType(INVALID_DATA, e)
+		case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
+			break
+		}
+	}
+	p.parseContextStack.pop()
+	if cxt, ok := p.parseContextStack.peek(); !ok {
+		return errEmptyJSONContextStack
+	} else if cxt == _CONTEXT_IN_TOPLEVEL {
+		return nil
+	}
+	return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
+	e := p.readNonSignificantWhitespace()
+	if e != nil {
+		return nil, VOID, NewTProtocolException(e)
+	}
+	b, e := p.reader.Peek(1)
+	if len(b) > 0 {
+		c := b[0]
+		switch c {
+		case JSON_NULL[0]:
+			buf := make([]byte, len(JSON_NULL))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return nil, VOID, NewTProtocolException(e)
+			}
+			if string(JSON_NULL) != string(buf) {
+				e = mismatch(string(JSON_NULL), string(buf))
+				return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return nil, VOID, nil
+		case JSON_QUOTE:
+			p.reader.ReadByte()
+			v, e := p.ParseStringBody()
+			if e != nil {
+				return v, UTF8, NewTProtocolException(e)
+			}
+			if v == JSON_INFINITY {
+				return INFINITY, DOUBLE, nil
+			} else if v == JSON_NEGATIVE_INFINITY {
+				return NEGATIVE_INFINITY, DOUBLE, nil
+			} else if v == JSON_NAN {
+				return NAN, DOUBLE, nil
+			}
+			return v, UTF8, nil
+		case JSON_TRUE[0]:
+			buf := make([]byte, len(JSON_TRUE))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return true, BOOL, NewTProtocolException(e)
+			}
+			if string(JSON_TRUE) != string(buf) {
+				e := mismatch(string(JSON_TRUE), string(buf))
+				return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return true, BOOL, nil
+		case JSON_FALSE[0]:
+			buf := make([]byte, len(JSON_FALSE))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return false, BOOL, NewTProtocolException(e)
+			}
+			if string(JSON_FALSE) != string(buf) {
+				e := mismatch(string(JSON_FALSE), string(buf))
+				return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return false, BOOL, nil
+		case JSON_LBRACKET[0]:
+			_, e := p.reader.ReadByte()
+			return make([]interface{}, 0), LIST, NewTProtocolException(e)
+		case JSON_LBRACE[0]:
+			_, e := p.reader.ReadByte()
+			return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
+			// assume numeric
+			v, e := p.readNumeric()
+			return v, DOUBLE, e
+		default:
+			e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
+			return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	e = fmt.Errorf("Cannot read a single element while parsing JSON.")
+	return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+
+}
+
+func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
+	cont := true
+	for cont {
+		b, _ := p.reader.Peek(1)
+		if len(b) < 1 {
+			return false, nil
+		}
+		switch b[0] {
+		default:
+			return false, nil
+		case JSON_NULL[0]:
+			cont = false
+			break
+		case ' ', '\n', '\r', '\t':
+			p.reader.ReadByte()
+			break
+		}
+	}
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		return true, nil
+	}
+	return false, nil
+}
+
+func (p *TSimpleJSONProtocol) readQuoteIfNext() {
+	b, _ := p.reader.Peek(1)
+	if len(b) > 0 && b[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+	}
+}
+
+func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
+	isNull, err := p.readIfNull()
+	if isNull || err != nil {
+		return NUMERIC_NULL, err
+	}
+	hasDecimalPoint := false
+	nextCanBeSign := true
+	hasE := false
+	MAX_LEN := 40
+	buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
+	continueFor := true
+	inQuotes := false
+	for continueFor {
+		c, err := p.reader.ReadByte()
+		if err != nil {
+			if err == io.EOF {
+				break
+			}
+			return NUMERIC_NULL, NewTProtocolException(err)
+		}
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			buf.WriteByte(c)
+			nextCanBeSign = false
+		case '.':
+			if hasDecimalPoint {
+				e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			if hasE {
+				e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			hasDecimalPoint, nextCanBeSign = true, false
+		case 'e', 'E':
+			if hasE {
+				e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			hasE, nextCanBeSign = true, true
+		case '-', '+':
+			if !nextCanBeSign {
+				e := fmt.Errorf("Negative sign within number")
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			nextCanBeSign = false
+		case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
+			p.reader.UnreadByte()
+			continueFor = false
+		case JSON_NAN[0]:
+			if buf.Len() == 0 {
+				buffer := make([]byte, len(JSON_NAN))
+				buffer[0] = c
+				_, e := p.reader.Read(buffer[1:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_NAN != string(buffer) {
+					e := mismatch(JSON_NAN, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return NAN, nil
+			} else {
+				e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		case JSON_INFINITY[0]:
+			if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
+				buffer := make([]byte, len(JSON_INFINITY))
+				buffer[0] = c
+				_, e := p.reader.Read(buffer[1:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_INFINITY != string(buffer) {
+					e := mismatch(JSON_INFINITY, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return INFINITY, nil
+			} else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
+				buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
+				buffer[0] = JSON_NEGATIVE_INFINITY[0]
+				buffer[1] = c
+				_, e := p.reader.Read(buffer[2:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_NEGATIVE_INFINITY != string(buffer) {
+					e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return NEGATIVE_INFINITY, nil
+			} else {
+				e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		case JSON_QUOTE:
+			if !inQuotes {
+				inQuotes = true
+			} else {
+				break
+			}
+		default:
+			e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+			return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	if buf.Len() == 0 {
+		e := fmt.Errorf("Unable to parse number from empty string ''")
+		return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return NewNumericFromJSONString(buf.String(), false), nil
+}
+
+// Safely peeks into the buffer, reading only what is necessary
+func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
+	for i := 0; i < len(b); i++ {
+		a, _ := p.reader.Peek(i + 1)
+		if len(a) < (i+1) || a[i] != b[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Reset the context stack to its initial state.
+func (p *TSimpleJSONProtocol) resetContextStack() {
+	p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
+	p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
+}
+
+func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
+	n, err := p.writer.Write(b)
+	if err != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+	}
+	return n, err
+}
+
+// SetTConfiguration implements TConfigurationSetter for propagation.
+func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.trans, conf)
+}
+
+var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go
new file mode 100644
index 000000000..563cbfc69
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// ErrAbandonRequest is a special error server handler implementations can
+// return to indicate that the request has been abandoned.
+//
+// TSimpleServer will check for this error, and close the client connection
+// instead of writing the response/error back to the client.
+//
+// It shall only be used when the server handler implementation know that the
+// client already abandoned the request (by checking that the passed in context
+// is already canceled, for example).
+var ErrAbandonRequest = errors.New("request abandoned")
+
+// ServerConnectivityCheckInterval defines the ticker interval used by
+// connectivity check in thrift compiled TProcessorFunc implementations.
+//
+// It's defined as a variable instead of constant, so that thrift server
+// implementations can change its value to control the behavior.
+//
+// If it's changed to <=0, the feature will be disabled.
+var ServerConnectivityCheckInterval = time.Millisecond * 5
+
+/*
+ * This is not a typical TSimpleServer as it is not blocked after accept a socket.
+ * It is more like a TThreadedServer that can handle different connections in different goroutines.
+ * This will work if golang user implements a conn-pool like thing in client side.
+ */
+type TSimpleServer struct {
+	closed int32
+	wg     sync.WaitGroup
+	mu     sync.Mutex
+
+	processorFactory       TProcessorFactory
+	serverTransport        TServerTransport
+	inputTransportFactory  TTransportFactory
+	outputTransportFactory TTransportFactory
+	inputProtocolFactory   TProtocolFactory
+	outputProtocolFactory  TProtocolFactory
+
+	// Headers to auto forward in THeaderProtocol
+	forwardHeaders []string
+
+	logger Logger
+}
+
+func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
+	return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
+}
+
+func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
+		serverTransport,
+		transportFactory,
+		protocolFactory,
+	)
+}
+
+func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
+		serverTransport,
+		inputTransportFactory,
+		outputTransportFactory,
+		inputProtocolFactory,
+		outputProtocolFactory,
+	)
+}
+
+func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
+	return NewTSimpleServerFactory6(processorFactory,
+		serverTransport,
+		NewTTransportFactory(),
+		NewTTransportFactory(),
+		NewTBinaryProtocolFactoryDefault(),
+		NewTBinaryProtocolFactoryDefault(),
+	)
+}
+
+func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory6(processorFactory,
+		serverTransport,
+		transportFactory,
+		transportFactory,
+		protocolFactory,
+		protocolFactory,
+	)
+}
+
+func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+	return &TSimpleServer{
+		processorFactory:       processorFactory,
+		serverTransport:        serverTransport,
+		inputTransportFactory:  inputTransportFactory,
+		outputTransportFactory: outputTransportFactory,
+		inputProtocolFactory:   inputProtocolFactory,
+		outputProtocolFactory:  outputProtocolFactory,
+	}
+}
+
+func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
+	return p.processorFactory
+}
+
+func (p *TSimpleServer) ServerTransport() TServerTransport {
+	return p.serverTransport
+}
+
+func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
+	return p.inputTransportFactory
+}
+
+func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
+	return p.outputTransportFactory
+}
+
+func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
+	return p.inputProtocolFactory
+}
+
+func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
+	return p.outputProtocolFactory
+}
+
+func (p *TSimpleServer) Listen() error {
+	return p.serverTransport.Listen()
+}
+
+// SetForwardHeaders sets the list of header keys that will be auto forwarded
+// while using THeaderProtocol.
+//
+// "forward" means that when the server is also a client to other upstream
+// thrift servers, the context object user gets in the processor functions will
+// have both read and write headers set, with write headers being forwarded.
+// Users can always override the write headers by calling SetWriteHeaderList
+// before calling thrift client functions.
+func (p *TSimpleServer) SetForwardHeaders(headers []string) {
+	size := len(headers)
+	if size == 0 {
+		p.forwardHeaders = nil
+		return
+	}
+
+	keys := make([]string, size)
+	copy(keys, headers)
+	p.forwardHeaders = keys
+}
+
+// SetLogger sets the logger used by this TSimpleServer.
+//
+// If no logger was set before Serve is called, a default logger using standard
+// log library will be used.
+func (p *TSimpleServer) SetLogger(logger Logger) {
+	p.logger = logger
+}
+
+func (p *TSimpleServer) innerAccept() (int32, error) {
+	client, err := p.serverTransport.Accept()
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	closed := atomic.LoadInt32(&p.closed)
+	if closed != 0 {
+		return closed, nil
+	}
+	if err != nil {
+		return 0, err
+	}
+	if client != nil {
+		p.wg.Add(1)
+		go func() {
+			defer p.wg.Done()
+			if err := p.processRequests(client); err != nil {
+				p.logger(fmt.Sprintf("error processing request: %v", err))
+			}
+		}()
+	}
+	return 0, nil
+}
+
+func (p *TSimpleServer) AcceptLoop() error {
+	for {
+		closed, err := p.innerAccept()
+		if err != nil {
+			return err
+		}
+		if closed != 0 {
+			return nil
+		}
+	}
+}
+
+func (p *TSimpleServer) Serve() error {
+	p.logger = fallbackLogger(p.logger)
+
+	err := p.Listen()
+	if err != nil {
+		return err
+	}
+	p.AcceptLoop()
+	return nil
+}
+
+func (p *TSimpleServer) Stop() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if atomic.LoadInt32(&p.closed) != 0 {
+		return nil
+	}
+	atomic.StoreInt32(&p.closed, 1)
+	p.serverTransport.Interrupt()
+	p.wg.Wait()
+	return nil
+}
+
+// If err is actually EOF, return nil, otherwise return err as-is.
+func treatEOFErrorsAsNil(err error) error {
+	if err == nil {
+		return nil
+	}
+	if errors.Is(err, io.EOF) {
+		return nil
+	}
+	var te TTransportException
+	if errors.As(err, &te) && te.TypeId() == END_OF_FILE {
+		return nil
+	}
+	return err
+}
+
+func (p *TSimpleServer) processRequests(client TTransport) (err error) {
+	defer func() {
+		err = treatEOFErrorsAsNil(err)
+	}()
+
+	processor := p.processorFactory.GetProcessor(client)
+	inputTransport, err := p.inputTransportFactory.GetTransport(client)
+	if err != nil {
+		return err
+	}
+	inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
+	var outputTransport TTransport
+	var outputProtocol TProtocol
+
+	// for THeaderProtocol, we must use the same protocol instance for
+	// input and output so that the response is in the same dialect that
+	// the server detected the request was in.
+	headerProtocol, ok := inputProtocol.(*THeaderProtocol)
+	if ok {
+		outputProtocol = inputProtocol
+	} else {
+		oTrans, err := p.outputTransportFactory.GetTransport(client)
+		if err != nil {
+			return err
+		}
+		outputTransport = oTrans
+		outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport)
+	}
+
+	if inputTransport != nil {
+		defer inputTransport.Close()
+	}
+	if outputTransport != nil {
+		defer outputTransport.Close()
+	}
+	for {
+		if atomic.LoadInt32(&p.closed) != 0 {
+			return nil
+		}
+
+		ctx := SetResponseHelper(
+			defaultCtx,
+			TResponseHelper{
+				THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol),
+			},
+		)
+		if headerProtocol != nil {
+			// We need to call ReadFrame here, otherwise we won't
+			// get any headers on the AddReadTHeaderToContext call.
+			//
+			// ReadFrame is safe to be called multiple times so it
+			// won't break when it's called again later when we
+			// actually start to read the message.
+			if err := headerProtocol.ReadFrame(ctx); err != nil {
+				return err
+			}
+			ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders())
+			ctx = SetWriteHeaderList(ctx, p.forwardHeaders)
+		}
+
+		ok, err := processor.Process(ctx, inputProtocol, outputProtocol)
+		if errors.Is(err, ErrAbandonRequest) {
+			return client.Close()
+		}
+		if errors.As(err, new(TTransportException)) && err != nil {
+			return err
+		}
+		var tae TApplicationException
+		if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD {
+			continue
+		}
+		if !ok {
+			break
+		}
+	}
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go
new file mode 100644
index 000000000..e911bf166
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"net"
+	"time"
+)
+
+type TSocket struct {
+	conn *socketConn
+	addr net.Addr
+	cfg  *TConfiguration
+
+	connectTimeout time.Duration
+	socketTimeout  time.Duration
+}
+
+// Deprecated: Use NewTSocketConf instead.
+func NewTSocket(hostPort string) (*TSocket, error) {
+	return NewTSocketConf(hostPort, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port.
+//
+// Example:
+//
+//     trans, err := thrift.NewTSocketConf("localhost:9090", &TConfiguration{
+//         ConnectTimeout: time.Second, // Use 0 for no timeout
+//         SocketTimeout:  time.Second, // Use 0 for no timeout
+//     })
+func NewTSocketConf(hostPort string, conf *TConfiguration) (*TSocket, error) {
+	addr, err := net.ResolveTCPAddr("tcp", hostPort)
+	if err != nil {
+		return nil, err
+	}
+	return NewTSocketFromAddrConf(addr, conf), nil
+}
+
+// Deprecated: Use NewTSocketConf instead.
+func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) {
+	return NewTSocketConf(hostPort, &TConfiguration{
+		ConnectTimeout: connTimeout,
+		SocketTimeout:  soTimeout,
+
+		noPropagation: true,
+	})
+}
+
+// NewTSocketFromAddrConf creates a TSocket from a net.Addr
+func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket {
+	return &TSocket{
+		addr: addr,
+		cfg:  conf,
+	}
+}
+
+// Deprecated: Use NewTSocketFromAddrConf instead.
+func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket {
+	return NewTSocketFromAddrConf(addr, &TConfiguration{
+		ConnectTimeout: connTimeout,
+		SocketTimeout:  soTimeout,
+
+		noPropagation: true,
+	})
+}
+
+// NewTSocketFromConnConf creates a TSocket from an existing net.Conn.
+func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket {
+	return &TSocket{
+		conn: wrapSocketConn(conn),
+		addr: conn.RemoteAddr(),
+		cfg:  conf,
+	}
+}
+
+// Deprecated: Use NewTSocketFromConnConf instead.
+func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket {
+	return NewTSocketFromConnConf(conn, &TConfiguration{
+		SocketTimeout: socketTimeout,
+
+		noPropagation: true,
+	})
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+//
+// It can be used to set connect and socket timeouts.
+func (p *TSocket) SetTConfiguration(conf *TConfiguration) {
+	p.cfg = conf
+}
+
+// Sets the connect timeout
+func (p *TSocket) SetConnTimeout(timeout time.Duration) error {
+	if p.cfg == nil {
+		p.cfg = &TConfiguration{
+			noPropagation: true,
+		}
+	}
+	p.cfg.ConnectTimeout = timeout
+	return nil
+}
+
+// Sets the socket timeout
+func (p *TSocket) SetSocketTimeout(timeout time.Duration) error {
+	if p.cfg == nil {
+		p.cfg = &TConfiguration{
+			noPropagation: true,
+		}
+	}
+	p.cfg.SocketTimeout = timeout
+	return nil
+}
+
+func (p *TSocket) pushDeadline(read, write bool) {
+	var t time.Time
+	if timeout := p.cfg.GetSocketTimeout(); timeout > 0 {
+		t = time.Now().Add(time.Duration(timeout))
+	}
+	if read && write {
+		p.conn.SetDeadline(t)
+	} else if read {
+		p.conn.SetReadDeadline(t)
+	} else if write {
+		p.conn.SetWriteDeadline(t)
+	}
+}
+
+// Connects the socket, creating a new socket object if necessary.
+func (p *TSocket) Open() error {
+	if p.conn.isValid() {
+		return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
+	}
+	if p.addr == nil {
+		return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
+	}
+	if len(p.addr.Network()) == 0 {
+		return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
+	}
+	if len(p.addr.String()) == 0 {
+		return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
+	}
+	var err error
+	if p.conn, err = createSocketConnFromReturn(net.DialTimeout(
+		p.addr.Network(),
+		p.addr.String(),
+		p.cfg.GetConnectTimeout(),
+	)); err != nil {
+		return NewTTransportException(NOT_OPEN, err.Error())
+	}
+	return nil
+}
+
+// Retrieve the underlying net.Conn
+func (p *TSocket) Conn() net.Conn {
+	return p.conn
+}
+
+// Returns true if the connection is open
+func (p *TSocket) IsOpen() bool {
+	return p.conn.IsOpen()
+}
+
+// Closes the socket.
+func (p *TSocket) Close() error {
+	// Close the socket
+	if p.conn != nil {
+		err := p.conn.Close()
+		if err != nil {
+			return err
+		}
+		p.conn = nil
+	}
+	return nil
+}
+
+//Returns the remote address of the socket.
+func (p *TSocket) Addr() net.Addr {
+	return p.addr
+}
+
+func (p *TSocket) Read(buf []byte) (int, error) {
+	if !p.conn.isValid() {
+		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
+	}
+	p.pushDeadline(true, false)
+	// NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between
+	// p.pushDeadline and p.conn.Read could cause the deadline set inside
+	// p.pushDeadline being reset, thus need to be avoided.
+	n, err := p.conn.Read(buf)
+	return n, NewTTransportExceptionFromError(err)
+}
+
+func (p *TSocket) Write(buf []byte) (int, error) {
+	if !p.conn.isValid() {
+		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
+	}
+	p.pushDeadline(false, true)
+	return p.conn.Write(buf)
+}
+
+func (p *TSocket) Flush(ctx context.Context) error {
+	return nil
+}
+
+func (p *TSocket) Interrupt() error {
+	if !p.conn.isValid() {
+		return nil
+	}
+	return p.conn.Close()
+}
+
+func (p *TSocket) RemainingBytes() (num_bytes uint64) {
+	const maxSize = ^uint64(0)
+	return maxSize // the truth is, we just don't know unless framed is used
+}
+
+var _ TConfigurationSetter = (*TSocket)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go
new file mode 100644
index 000000000..c1cc30c6c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"net"
+)
+
+// socketConn is a wrapped net.Conn that tries to do connectivity check.
+type socketConn struct {
+	net.Conn
+
+	buffer [1]byte
+}
+
+var _ net.Conn = (*socketConn)(nil)
+
+// createSocketConnFromReturn is a language sugar to help create socketConn from
+// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc.
+func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) {
+	if err != nil {
+		return nil, err
+	}
+	return &socketConn{
+		Conn: conn,
+	}, nil
+}
+
+// wrapSocketConn wraps an existing net.Conn into *socketConn.
+func wrapSocketConn(conn net.Conn) *socketConn {
+	// In case conn is already wrapped,
+	// return it as-is and avoid double wrapping.
+	if sc, ok := conn.(*socketConn); ok {
+		return sc
+	}
+
+	return &socketConn{
+		Conn: conn,
+	}
+}
+
+// isValid checks whether there's a valid connection.
+//
+// It's nil safe, and returns false if sc itself is nil, or if the underlying
+// connection is nil.
+//
+// It's the same as the previous implementation of TSocket.IsOpen and
+// TSSLSocket.IsOpen before we added connectivity check.
+func (sc *socketConn) isValid() bool {
+	return sc != nil && sc.Conn != nil
+}
+
+// IsOpen checks whether the connection is open.
+//
+// It's nil safe, and returns false if sc itself is nil, or if the underlying
+// connection is nil.
+//
+// Otherwise, it tries to do a connectivity check and returns the result.
+//
+// It also has the side effect of resetting the previously set read deadline on
+// the socket. As a result, it shouldn't be called between setting read deadline
+// and doing actual read.
+func (sc *socketConn) IsOpen() bool {
+	if !sc.isValid() {
+		return false
+	}
+	return sc.checkConn() == nil
+}
+
+// Read implements io.Reader.
+//
+// On Windows, it behaves the same as the underlying net.Conn.Read.
+//
+// On non-Windows, it treats len(p) == 0 as a connectivity check instead of
+// readability check, which means instead of blocking until there's something to
+// read (readability check), or always return (0, nil) (the default behavior of
+// go's stdlib implementation on non-Windows), it never blocks, and will return
+// an error if the connection is lost.
+func (sc *socketConn) Read(p []byte) (n int, err error) {
+	if len(p) == 0 {
+		return 0, sc.read0()
+	}
+
+	return sc.Conn.Read(p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go
new file mode 100644
index 000000000..f5fab3ab6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go
@@ -0,0 +1,83 @@
+// +build !windows
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"io"
+	"syscall"
+	"time"
+)
+
+// We rely on this variable to be the zero time,
+// but define it as global variable to avoid repetitive allocations.
+// Please DO NOT mutate this variable in any way.
+var zeroTime time.Time
+
+func (sc *socketConn) read0() error {
+	return sc.checkConn()
+}
+
+func (sc *socketConn) checkConn() error {
+	syscallConn, ok := sc.Conn.(syscall.Conn)
+	if !ok {
+		// No way to check, return nil
+		return nil
+	}
+
+	// The reading about to be done here is non-blocking so we don't really
+	// need a read deadline. We just need to clear the previously set read
+	// deadline, if any.
+	sc.Conn.SetReadDeadline(zeroTime)
+
+	rc, err := syscallConn.SyscallConn()
+	if err != nil {
+		return err
+	}
+
+	var n int
+
+	if readErr := rc.Read(func(fd uintptr) bool {
+		n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT)
+		return true
+	}); readErr != nil {
+		return readErr
+	}
+
+	if n > 0 {
+		// We got something, which means we are good
+		return nil
+	}
+
+	if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) {
+		// This means the connection is still open but we don't have
+		// anything to read right now.
+		return nil
+	}
+
+	if err != nil {
+		return err
+	}
+
+	// At this point, it means the other side already closed the connection.
+	return io.EOF
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go
new file mode 100644
index 000000000..679838c3b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go
@@ -0,0 +1,34 @@
+// +build windows
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+func (sc *socketConn) read0() error {
+	// On windows, we fallback to the default behavior of reading 0 bytes.
+	var p []byte
+	_, err := sc.Conn.Read(p)
+	return err
+}
+
+func (sc *socketConn) checkConn() error {
+	// On windows, we always return nil for this check.
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go
new file mode 100644
index 000000000..907afca32
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"crypto/tls"
+	"net"
+	"time"
+)
+
+type TSSLServerSocket struct {
+	listener      net.Listener
+	addr          net.Addr
+	clientTimeout time.Duration
+	interrupted   bool
+	cfg           *tls.Config
+}
+
+func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) {
+	return NewTSSLServerSocketTimeout(listenAddr, cfg, 0)
+}
+
+func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
+	if cfg.MinVersion == 0 {
+		cfg.MinVersion = tls.VersionTLS10
+	}
+	addr, err := net.ResolveTCPAddr("tcp", listenAddr)
+	if err != nil {
+		return nil, err
+	}
+	return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil
+}
+
+func (p *TSSLServerSocket) Listen() error {
+	if p.IsListening() {
+		return nil
+	}
+	l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg)
+	if err != nil {
+		return err
+	}
+	p.listener = l
+	return nil
+}
+
+func (p *TSSLServerSocket) Accept() (TTransport, error) {
+	if p.interrupted {
+		return nil, errTransportInterrupted
+	}
+	if p.listener == nil {
+		return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
+	}
+	conn, err := p.listener.Accept()
+	if err != nil {
+		return nil, NewTTransportExceptionFromError(err)
+	}
+	return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil
+}
+
+// Checks whether the socket is listening.
+func (p *TSSLServerSocket) IsListening() bool {
+	return p.listener != nil
+}
+
+// Connects the socket, creating a new socket object if necessary.
+func (p *TSSLServerSocket) Open() error {
+	if p.IsListening() {
+		return NewTTransportException(ALREADY_OPEN, "Server socket already open")
+	}
+	if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
+		return err
+	} else {
+		p.listener = l
+	}
+	return nil
+}
+
+func (p *TSSLServerSocket) Addr() net.Addr {
+	return p.addr
+}
+
+func (p *TSSLServerSocket) Close() error {
+	defer func() {
+		p.listener = nil
+	}()
+	if p.IsListening() {
+		return p.listener.Close()
+	}
+	return nil
+}
+
+func (p *TSSLServerSocket) Interrupt() error {
+	p.interrupted = true
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go
new file mode 100644
index 000000000..6359a74ce
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"time"
+)
+
+type TSSLSocket struct {
+	conn *socketConn
+	// hostPort contains host:port (e.g. "asdf.com:12345"). The field is
+	// only valid if addr is nil.
+	hostPort string
+	// addr is nil when hostPort is not "", and is only used when the
+	// TSSLSocket is constructed from a net.Addr.
+	addr net.Addr
+
+	cfg *TConfiguration
+}
+
+// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port.
+//
+// Example:
+//
+//     trans, err := thrift.NewTSSLSocketConf("localhost:9090", nil, &TConfiguration{
+//         ConnectTimeout: time.Second, // Use 0 for no timeout
+//         SocketTimeout:  time.Second, // Use 0 for no timeout
+//     })
+func NewTSSLSocketConf(hostPort string, conf *TConfiguration) (*TSSLSocket, error) {
+	if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 {
+		cfg.MinVersion = tls.VersionTLS10
+	}
+	return &TSSLSocket{
+		hostPort: hostPort,
+		cfg:      conf,
+	}, nil
+}
+
+// Deprecated: Use NewTSSLSocketConf instead.
+func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
+	return NewTSSLSocketConf(hostPort, &TConfiguration{
+		TLSConfig: cfg,
+
+		noPropagation: true,
+	})
+}
+
+// Deprecated: Use NewTSSLSocketConf instead.
+func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) {
+	return NewTSSLSocketConf(hostPort, &TConfiguration{
+		ConnectTimeout: connectTimeout,
+		SocketTimeout:  socketTimeout,
+		TLSConfig:      cfg,
+
+		noPropagation: true,
+	})
+}
+
+// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr.
+func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket {
+	return &TSSLSocket{
+		addr: addr,
+		cfg:  conf,
+	}
+}
+
+// Deprecated: Use NewTSSLSocketFromAddrConf instead.
+func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket {
+	return NewTSSLSocketFromAddrConf(addr, &TConfiguration{
+		ConnectTimeout: connectTimeout,
+		SocketTimeout:  socketTimeout,
+		TLSConfig:      cfg,
+
+		noPropagation: true,
+	})
+}
+
+// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn.
+func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket {
+	return &TSSLSocket{
+		conn: wrapSocketConn(conn),
+		addr: conn.RemoteAddr(),
+		cfg:  conf,
+	}
+}
+
+// Deprecated: Use NewTSSLSocketFromConnConf instead.
+func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket {
+	return NewTSSLSocketFromConnConf(conn, &TConfiguration{
+		SocketTimeout: socketTimeout,
+		TLSConfig:     cfg,
+
+		noPropagation: true,
+	})
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+//
+// It can be used to change connect and socket timeouts.
+func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) {
+	p.cfg = conf
+}
+
+// Sets the connect timeout
+func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error {
+	if p.cfg == nil {
+		p.cfg = &TConfiguration{}
+	}
+	p.cfg.ConnectTimeout = timeout
+	return nil
+}
+
+// Sets the socket timeout
+func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error {
+	if p.cfg == nil {
+		p.cfg = &TConfiguration{}
+	}
+	p.cfg.SocketTimeout = timeout
+	return nil
+}
+
+func (p *TSSLSocket) pushDeadline(read, write bool) {
+	var t time.Time
+	if timeout := p.cfg.GetSocketTimeout(); timeout > 0 {
+		t = time.Now().Add(time.Duration(timeout))
+	}
+	if read && write {
+		p.conn.SetDeadline(t)
+	} else if read {
+		p.conn.SetReadDeadline(t)
+	} else if write {
+		p.conn.SetWriteDeadline(t)
+	}
+}
+
+// Connects the socket, creating a new socket object if necessary.
+func (p *TSSLSocket) Open() error {
+	var err error
+	// If we have a hostname, we need to pass the hostname to tls.Dial for
+	// certificate hostname checks.
+	if p.hostPort != "" {
+		if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer(
+			&net.Dialer{
+				Timeout: p.cfg.GetConnectTimeout(),
+			},
+			"tcp",
+			p.hostPort,
+			p.cfg.GetTLSConfig(),
+		)); err != nil {
+			return NewTTransportException(NOT_OPEN, err.Error())
+		}
+	} else {
+		if p.conn.isValid() {
+			return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
+		}
+		if p.addr == nil {
+			return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
+		}
+		if len(p.addr.Network()) == 0 {
+			return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
+		}
+		if len(p.addr.String()) == 0 {
+			return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
+		}
+		if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer(
+			&net.Dialer{
+				Timeout: p.cfg.GetConnectTimeout(),
+			},
+			p.addr.Network(),
+			p.addr.String(),
+			p.cfg.GetTLSConfig(),
+		)); err != nil {
+			return NewTTransportException(NOT_OPEN, err.Error())
+		}
+	}
+	return nil
+}
+
+// Retrieve the underlying net.Conn
+func (p *TSSLSocket) Conn() net.Conn {
+	return p.conn
+}
+
+// Returns true if the connection is open
+func (p *TSSLSocket) IsOpen() bool {
+	return p.conn.IsOpen()
+}
+
+// Closes the socket.
+func (p *TSSLSocket) Close() error {
+	// Close the socket
+	if p.conn != nil {
+		err := p.conn.Close()
+		if err != nil {
+			return err
+		}
+		p.conn = nil
+	}
+	return nil
+}
+
+func (p *TSSLSocket) Read(buf []byte) (int, error) {
+	if !p.conn.isValid() {
+		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
+	}
+	p.pushDeadline(true, false)
+	// NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between
+	// p.pushDeadline and p.conn.Read could cause the deadline set inside
+	// p.pushDeadline being reset, thus need to be avoided.
+	n, err := p.conn.Read(buf)
+	return n, NewTTransportExceptionFromError(err)
+}
+
+func (p *TSSLSocket) Write(buf []byte) (int, error) {
+	if !p.conn.isValid() {
+		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
+	}
+	p.pushDeadline(false, true)
+	return p.conn.Write(buf)
+}
+
+func (p *TSSLSocket) Flush(ctx context.Context) error {
+	return nil
+}
+
+func (p *TSSLSocket) Interrupt() error {
+	if !p.conn.isValid() {
+		return nil
+	}
+	return p.conn.Close()
+}
+
+func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
+	const maxSize = ^uint64(0)
+	return maxSize // the truth is, we just don't know unless framed is used
+}
+
+var _ TConfigurationSetter = (*TSSLSocket)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go
new file mode 100644
index 000000000..ba2738a8d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"errors"
+	"io"
+)
+
+var errTransportInterrupted = errors.New("Transport Interrupted")
+
+type Flusher interface {
+	Flush() (err error)
+}
+
+type ContextFlusher interface {
+	Flush(ctx context.Context) (err error)
+}
+
+type ReadSizeProvider interface {
+	RemainingBytes() (num_bytes uint64)
+}
+
+// Encapsulates the I/O layer
+type TTransport interface {
+	io.ReadWriteCloser
+	ContextFlusher
+	ReadSizeProvider
+
+	// Opens the transport for communication
+	Open() error
+
+	// Returns true if the transport is open
+	IsOpen() bool
+}
+
+type stringWriter interface {
+	WriteString(s string) (n int, err error)
+}
+
+// This is "enchanced" transport with extra capabilities. You need to use one of these
+// to construct protocol.
+// Notably, TSocket does not implement this interface, and it is always a mistake to use
+// TSocket directly in protocol.
+type TRichTransport interface {
+	io.ReadWriter
+	io.ByteReader
+	io.ByteWriter
+	stringWriter
+	ContextFlusher
+	ReadSizeProvider
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go
new file mode 100644
index 000000000..0a3f07646
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"io"
+)
+
+type timeoutable interface {
+	Timeout() bool
+}
+
+// Thrift Transport exception
+type TTransportException interface {
+	TException
+	TypeId() int
+	Err() error
+}
+
+const (
+	UNKNOWN_TRANSPORT_EXCEPTION = 0
+	NOT_OPEN                    = 1
+	ALREADY_OPEN                = 2
+	TIMED_OUT                   = 3
+	END_OF_FILE                 = 4
+)
+
+type tTransportException struct {
+	typeId int
+	err    error
+	msg    string
+}
+
+var _ TTransportException = (*tTransportException)(nil)
+
+func (tTransportException) TExceptionType() TExceptionType {
+	return TExceptionTypeTransport
+}
+
+func (p *tTransportException) TypeId() int {
+	return p.typeId
+}
+
+func (p *tTransportException) Error() string {
+	return p.msg
+}
+
+func (p *tTransportException) Err() error {
+	return p.err
+}
+
+func (p *tTransportException) Unwrap() error {
+	return p.err
+}
+
+func (p *tTransportException) Timeout() bool {
+	return p.typeId == TIMED_OUT
+}
+
+func NewTTransportException(t int, e string) TTransportException {
+	return &tTransportException{
+		typeId: t,
+		err:    errors.New(e),
+		msg:    e,
+	}
+}
+
+func NewTTransportExceptionFromError(e error) TTransportException {
+	if e == nil {
+		return nil
+	}
+
+	if t, ok := e.(TTransportException); ok {
+		return t
+	}
+
+	te := &tTransportException{
+		typeId: UNKNOWN_TRANSPORT_EXCEPTION,
+		err:    e,
+		msg:    e.Error(),
+	}
+
+	if isTimeoutError(e) {
+		te.typeId = TIMED_OUT
+		return te
+	}
+
+	if errors.Is(e, io.EOF) {
+		te.typeId = END_OF_FILE
+		return te
+	}
+
+	return te
+}
+
+func prependTTransportException(prepend string, e TTransportException) TTransportException {
+	return &tTransportException{
+		typeId: e.TypeId(),
+		err:    e,
+		msg:    prepend + e.Error(),
+	}
+}
+
+// isTimeoutError returns true when err is an error caused by timeout.
+//
+// Note that this also includes TTransportException wrapped timeout errors.
+func isTimeoutError(err error) bool {
+	var t timeoutable
+	if errors.As(err, &t) {
+		return t.Timeout()
+	}
+	return false
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go
new file mode 100644
index 000000000..c80580794
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory class used to create wrapped instance of Transports.
+// This is used primarily in servers, which get Transports from
+// a ServerTransport and then may want to mutate them (i.e. create
+// a BufferedTransport from the underlying base transport)
+type TTransportFactory interface {
+	GetTransport(trans TTransport) (TTransport, error)
+}
+
+type tTransportFactory struct{}
+
+// Return a wrapped instance of the base Transport.
+func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	return trans, nil
+}
+
+func NewTTransportFactory() TTransportFactory {
+	return &tTransportFactory{}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go
new file mode 100644
index 000000000..4292ffcad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Type constants in the Thrift protocol
+type TType byte
+
+const (
+	STOP   = 0
+	VOID   = 1
+	BOOL   = 2
+	BYTE   = 3
+	I08    = 3
+	DOUBLE = 4
+	I16    = 6
+	I32    = 8
+	I64    = 10
+	STRING = 11
+	UTF7   = 11
+	STRUCT = 12
+	MAP    = 13
+	SET    = 14
+	LIST   = 15
+	UTF8   = 16
+	UTF16  = 17
+	//BINARY = 18   wrong and unusued
+)
+
+var typeNames = map[int]string{
+	STOP:   "STOP",
+	VOID:   "VOID",
+	BOOL:   "BOOL",
+	BYTE:   "BYTE",
+	DOUBLE: "DOUBLE",
+	I16:    "I16",
+	I32:    "I32",
+	I64:    "I64",
+	STRING: "STRING",
+	STRUCT: "STRUCT",
+	MAP:    "MAP",
+	SET:    "SET",
+	LIST:   "LIST",
+	UTF8:   "UTF8",
+	UTF16:  "UTF16",
+}
+
+func (p TType) String() string {
+	if s, ok := typeNames[int(p)]; ok {
+		return s
+	}
+	return "Unknown"
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go
new file mode 100644
index 000000000..259943a62
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go
@@ -0,0 +1,137 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied. See the License for the
+* specific language governing permissions and limitations
+* under the License.
+ */
+
+package thrift
+
+import (
+	"compress/zlib"
+	"context"
+	"io"
+)
+
+// TZlibTransportFactory is a factory for TZlibTransport instances
+type TZlibTransportFactory struct {
+	level   int
+	factory TTransportFactory
+}
+
+// TZlibTransport is a TTransport implementation that makes use of zlib compression.
+type TZlibTransport struct {
+	reader    io.ReadCloser
+	transport TTransport
+	writer    *zlib.Writer
+}
+
+// GetTransport constructs a new instance of NewTZlibTransport
+func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if p.factory != nil {
+		// wrap other factory
+		var err error
+		trans, err = p.factory.GetTransport(trans)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return NewTZlibTransport(trans, p.level)
+}
+
+// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
+func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
+	return &TZlibTransportFactory{level: level, factory: nil}
+}
+
+// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory
+// as a wrapper over existing transport factory
+func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory {
+	return &TZlibTransportFactory{level: level, factory: factory}
+}
+
+// NewTZlibTransport constructs a new instance of TZlibTransport
+func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) {
+	w, err := zlib.NewWriterLevel(trans, level)
+	if err != nil {
+		return nil, err
+	}
+
+	return &TZlibTransport{
+		writer:    w,
+		transport: trans,
+	}, nil
+}
+
+// Close closes the reader and writer (flushing any unwritten data) and closes
+// the underlying transport.
+func (z *TZlibTransport) Close() error {
+	if z.reader != nil {
+		if err := z.reader.Close(); err != nil {
+			return err
+		}
+	}
+	if err := z.writer.Close(); err != nil {
+		return err
+	}
+	return z.transport.Close()
+}
+
+// Flush flushes the writer and its underlying transport.
+func (z *TZlibTransport) Flush(ctx context.Context) error {
+	if err := z.writer.Flush(); err != nil {
+		return err
+	}
+	return z.transport.Flush(ctx)
+}
+
+// IsOpen returns true if the transport is open
+func (z *TZlibTransport) IsOpen() bool {
+	return z.transport.IsOpen()
+}
+
+// Open opens the transport for communication
+func (z *TZlibTransport) Open() error {
+	return z.transport.Open()
+}
+
+func (z *TZlibTransport) Read(p []byte) (int, error) {
+	if z.reader == nil {
+		r, err := zlib.NewReader(z.transport)
+		if err != nil {
+			return 0, NewTTransportExceptionFromError(err)
+		}
+		z.reader = r
+	}
+
+	return z.reader.Read(p)
+}
+
+// RemainingBytes returns the size in bytes of the data that is still to be
+// read.
+func (z *TZlibTransport) RemainingBytes() uint64 {
+	return z.transport.RemainingBytes()
+}
+
+func (z *TZlibTransport) Write(p []byte) (int, error) {
+	return z.writer.Write(p)
+}
+
+// SetTConfiguration implements TConfigurationSetter for propagation.
+func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(z.transport, conf)
+}
+
+var _ TConfigurationSetter = (*TZlibTransport)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go
new file mode 100644
index 000000000..ddbd681d0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go
@@ -0,0 +1,360 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
+
+import (
+	"context"
+	"encoding/binary"
+	"encoding/json"
+	"fmt"
+	"sync"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
+	"go.opentelemetry.io/otel/sdk/resource"
+	sdktrace "go.opentelemetry.io/otel/sdk/trace"
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+	"go.opentelemetry.io/otel/trace"
+)
+
+const (
+	keyInstrumentationLibraryName    = "otel.library.name"
+	keyInstrumentationLibraryVersion = "otel.library.version"
+	keyError                         = "error"
+	keySpanKind                      = "span.kind"
+	keyStatusCode                    = "otel.status_code"
+	keyStatusMessage                 = "otel.status_description"
+	keyDroppedAttributeCount         = "otel.event.dropped_attributes_count"
+	keyEventName                     = "event"
+)
+
+// New returns an OTel Exporter implementation that exports the collected
+// spans to Jaeger.
+func New(endpointOption EndpointOption) (*Exporter, error) {
+	uploader, err := endpointOption.newBatchUploader()
+	if err != nil {
+		return nil, err
+	}
+
+	// Fetch default service.name from default resource for backup
+	var defaultServiceName string
+	defaultResource := resource.Default()
+	if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists {
+		defaultServiceName = value.AsString()
+	}
+	if defaultServiceName == "" {
+		return nil, fmt.Errorf("failed to get service name from default resource")
+	}
+
+	stopCh := make(chan struct{})
+	e := &Exporter{
+		uploader:           uploader,
+		stopCh:             stopCh,
+		defaultServiceName: defaultServiceName,
+	}
+	return e, nil
+}
+
+// Exporter exports OpenTelemetry spans to a Jaeger agent or collector.
+type Exporter struct {
+	uploader           batchUploader
+	stopOnce           sync.Once
+	stopCh             chan struct{}
+	defaultServiceName string
+}
+
+var _ sdktrace.SpanExporter = (*Exporter)(nil)
+
+// ExportSpans transforms and exports OpenTelemetry spans to Jaeger.
+func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
+	// Return fast if context is already canceled or Exporter shutdown.
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-e.stopCh:
+		return nil
+	default:
+	}
+
+	// Cancel export if Exporter is shutdown.
+	var cancel context.CancelFunc
+	ctx, cancel = context.WithCancel(ctx)
+	defer cancel()
+	go func(ctx context.Context, cancel context.CancelFunc) {
+		select {
+		case <-ctx.Done():
+		case <-e.stopCh:
+			cancel()
+		}
+	}(ctx, cancel)
+
+	for _, batch := range jaegerBatchList(spans, e.defaultServiceName) {
+		if err := e.uploader.upload(ctx, batch); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// Shutdown stops the Exporter. This will close all connections and release
+// all resources held by the Exporter.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+	// Stop any active and subsequent exports.
+	e.stopOnce.Do(func() { close(e.stopCh) })
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+	return e.uploader.shutdown(ctx)
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+func (e *Exporter) MarshalLog() interface{} {
+	return struct {
+		Type string
+	}{
+		Type: "jaeger",
+	}
+}
+
+func spanToThrift(ss sdktrace.ReadOnlySpan) *gen.Span {
+	attr := ss.Attributes()
+	tags := make([]*gen.Tag, 0, len(attr))
+	for _, kv := range attr {
+		tag := keyValueToTag(kv)
+		if tag != nil {
+			tags = append(tags, tag)
+		}
+	}
+
+	if is := ss.InstrumentationScope(); is.Name != "" {
+		tags = append(tags, getStringTag(keyInstrumentationLibraryName, is.Name))
+		if is.Version != "" {
+			tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, is.Version))
+		}
+	}
+
+	if ss.SpanKind() != trace.SpanKindInternal {
+		tags = append(tags,
+			getStringTag(keySpanKind, ss.SpanKind().String()),
+		)
+	}
+
+	if ss.Status().Code != codes.Unset {
+		switch ss.Status().Code {
+		case codes.Ok:
+			tags = append(tags, getStringTag(keyStatusCode, "OK"))
+		case codes.Error:
+			tags = append(tags, getBoolTag(keyError, true))
+			tags = append(tags, getStringTag(keyStatusCode, "ERROR"))
+		}
+		if ss.Status().Description != "" {
+			tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description))
+		}
+	}
+
+	var logs []*gen.Log
+	for _, a := range ss.Events() {
+		nTags := len(a.Attributes)
+		if a.Name != "" {
+			nTags++
+		}
+		if a.DroppedAttributeCount != 0 {
+			nTags++
+		}
+		fields := make([]*gen.Tag, 0, nTags)
+		if a.Name != "" {
+			// If an event contains an attribute with the same key, it needs
+			// to be given precedence and overwrite this.
+			fields = append(fields, getStringTag(keyEventName, a.Name))
+		}
+		for _, kv := range a.Attributes {
+			tag := keyValueToTag(kv)
+			if tag != nil {
+				fields = append(fields, tag)
+			}
+		}
+		if a.DroppedAttributeCount != 0 {
+			fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount)))
+		}
+		logs = append(logs, &gen.Log{
+			Timestamp: a.Time.UnixNano() / 1000,
+			Fields:    fields,
+		})
+	}
+
+	var refs []*gen.SpanRef
+	for _, link := range ss.Links() {
+		tid := link.SpanContext.TraceID()
+		sid := link.SpanContext.SpanID()
+		refs = append(refs, &gen.SpanRef{
+			TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])),
+			TraceIdLow:  int64(binary.BigEndian.Uint64(tid[8:16])),
+			SpanId:      int64(binary.BigEndian.Uint64(sid[:])),
+			RefType:     gen.SpanRefType_FOLLOWS_FROM,
+		})
+	}
+
+	tid := ss.SpanContext().TraceID()
+	sid := ss.SpanContext().SpanID()
+	psid := ss.Parent().SpanID()
+	return &gen.Span{
+		TraceIdHigh:   int64(binary.BigEndian.Uint64(tid[0:8])),
+		TraceIdLow:    int64(binary.BigEndian.Uint64(tid[8:16])),
+		SpanId:        int64(binary.BigEndian.Uint64(sid[:])),
+		ParentSpanId:  int64(binary.BigEndian.Uint64(psid[:])),
+		OperationName: ss.Name(), // TODO: if span kind is added then add prefix "Sent"/"Recv"
+		Flags:         int32(ss.SpanContext().TraceFlags()),
+		StartTime:     ss.StartTime().UnixNano() / 1000,
+		Duration:      ss.EndTime().Sub(ss.StartTime()).Nanoseconds() / 1000,
+		Tags:          tags,
+		Logs:          logs,
+		References:    refs,
+	}
+}
+
+func keyValueToTag(keyValue attribute.KeyValue) *gen.Tag {
+	var tag *gen.Tag
+	switch keyValue.Value.Type() {
+	case attribute.STRING:
+		s := keyValue.Value.AsString()
+		tag = &gen.Tag{
+			Key:   string(keyValue.Key),
+			VStr:  &s,
+			VType: gen.TagType_STRING,
+		}
+	case attribute.BOOL:
+		b := keyValue.Value.AsBool()
+		tag = &gen.Tag{
+			Key:   string(keyValue.Key),
+			VBool: &b,
+			VType: gen.TagType_BOOL,
+		}
+	case attribute.INT64:
+		i := keyValue.Value.AsInt64()
+		tag = &gen.Tag{
+			Key:   string(keyValue.Key),
+			VLong: &i,
+			VType: gen.TagType_LONG,
+		}
+	case attribute.FLOAT64:
+		f := keyValue.Value.AsFloat64()
+		tag = &gen.Tag{
+			Key:     string(keyValue.Key),
+			VDouble: &f,
+			VType:   gen.TagType_DOUBLE,
+		}
+	case attribute.BOOLSLICE,
+		attribute.INT64SLICE,
+		attribute.FLOAT64SLICE,
+		attribute.STRINGSLICE:
+		data, _ := json.Marshal(keyValue.Value.AsInterface())
+		a := (string)(data)
+		tag = &gen.Tag{
+			Key:   string(keyValue.Key),
+			VStr:  &a,
+			VType: gen.TagType_STRING,
+		}
+	}
+	return tag
+}
+
+func getInt64Tag(k string, i int64) *gen.Tag {
+	return &gen.Tag{
+		Key:   k,
+		VLong: &i,
+		VType: gen.TagType_LONG,
+	}
+}
+
+func getStringTag(k, s string) *gen.Tag {
+	return &gen.Tag{
+		Key:   k,
+		VStr:  &s,
+		VType: gen.TagType_STRING,
+	}
+}
+
+func getBoolTag(k string, b bool) *gen.Tag {
+	return &gen.Tag{
+		Key:   k,
+		VBool: &b,
+		VType: gen.TagType_BOOL,
+	}
+}
+
+// jaegerBatchList transforms a slice of spans into a slice of jaeger Batch.
+func jaegerBatchList(ssl []sdktrace.ReadOnlySpan, defaultServiceName string) []*gen.Batch {
+	if len(ssl) == 0 {
+		return nil
+	}
+
+	batchDict := make(map[attribute.Distinct]*gen.Batch)
+
+	for _, ss := range ssl {
+		if ss == nil {
+			continue
+		}
+
+		resourceKey := ss.Resource().Equivalent()
+		batch, bOK := batchDict[resourceKey]
+		if !bOK {
+			batch = &gen.Batch{
+				Process: process(ss.Resource(), defaultServiceName),
+				Spans:   []*gen.Span{},
+			}
+		}
+		batch.Spans = append(batch.Spans, spanToThrift(ss))
+		batchDict[resourceKey] = batch
+	}
+
+	// Transform the categorized map into a slice
+	batchList := make([]*gen.Batch, 0, len(batchDict))
+	for _, batch := range batchDict {
+		batchList = append(batchList, batch)
+	}
+	return batchList
+}
+
+// process transforms an OTel Resource into a jaeger Process.
+func process(res *resource.Resource, defaultServiceName string) *gen.Process {
+	var process gen.Process
+
+	var serviceName attribute.KeyValue
+	if res != nil {
+		for iter := res.Iter(); iter.Next(); {
+			if iter.Attribute().Key == semconv.ServiceNameKey {
+				serviceName = iter.Attribute()
+				// Don't convert service.name into tag.
+				continue
+			}
+			if tag := keyValueToTag(iter.Attribute()); tag != nil {
+				process.Tags = append(process.Tags, tag)
+			}
+		}
+	}
+
+	// If no service.name is contained in a Span's Resource,
+	// that field MUST be populated from the default Resource.
+	if serviceName.Value.AsString() == "" {
+		serviceName = semconv.ServiceName(defaultServiceName)
+	}
+	process.ServiceName = serviceName.Value.AsString()
+
+	return &process
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go
new file mode 100644
index 000000000..88055c8a3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go
@@ -0,0 +1,204 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
+
+import (
+	"fmt"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-logr/logr"
+)
+
+// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is
+// different than the current conn then the new address is dialed and the conn is swapped.
+type reconnectingUDPConn struct {
+	// `sync/atomic` expects the first word in an allocated struct to be 64-bit
+	// aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details.
+	bufferBytes int64
+	hostPort    string
+	resolveFunc resolveFunc
+	dialFunc    dialFunc
+	logger      logr.Logger
+
+	connMtx   sync.RWMutex
+	conn      *net.UDPConn
+	destAddr  *net.UDPAddr
+	closeChan chan struct{}
+}
+
+type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error)
+type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error)
+
+// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is
+// different than the current conn then the new address is dialed and the conn is swapped.
+func newReconnectingUDPConn(hostPort string, bufferBytes int, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger logr.Logger) (*reconnectingUDPConn, error) {
+	conn := &reconnectingUDPConn{
+		hostPort:    hostPort,
+		resolveFunc: resolveFunc,
+		dialFunc:    dialFunc,
+		logger:      logger,
+		closeChan:   make(chan struct{}),
+		bufferBytes: int64(bufferBytes),
+	}
+
+	if err := conn.attemptResolveAndDial(); err != nil {
+		conn.logf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout)
+	}
+
+	go conn.reconnectLoop(resolveTimeout)
+
+	return conn, nil
+}
+
+func (c *reconnectingUDPConn) logf(format string, args ...interface{}) {
+	if c.logger != emptyLogger {
+		c.logger.Info(format, args...)
+	}
+}
+
+func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) {
+	ticker := time.NewTicker(resolveTimeout)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-c.closeChan:
+			return
+		case <-ticker.C:
+			if err := c.attemptResolveAndDial(); err != nil {
+				c.logf("%s", err.Error())
+			}
+		}
+	}
+}
+
+func (c *reconnectingUDPConn) attemptResolveAndDial() error {
+	newAddr, err := c.resolveFunc("udp", c.hostPort)
+	if err != nil {
+		return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err)
+	}
+
+	c.connMtx.RLock()
+	curAddr := c.destAddr
+	c.connMtx.RUnlock()
+
+	// dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn
+	if curAddr != nil && newAddr.String() == curAddr.String() {
+		return nil
+	}
+
+	if err := c.attemptDialNewAddr(newAddr); err != nil {
+		return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err)
+	}
+
+	return nil
+}
+
+func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error {
+	connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr)
+	if err != nil {
+		return err
+	}
+
+	if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 {
+		if err = connUDP.SetWriteBuffer(bufferBytes); err != nil {
+			return err
+		}
+	}
+
+	c.connMtx.Lock()
+	c.destAddr = newAddr
+	// store prev to close later
+	prevConn := c.conn
+	c.conn = connUDP
+	c.connMtx.Unlock()
+
+	if prevConn != nil {
+		return prevConn.Close()
+	}
+
+	return nil
+}
+
+// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning.
+func (c *reconnectingUDPConn) Write(b []byte) (int, error) {
+	var bytesWritten int
+	var err error
+
+	c.connMtx.RLock()
+	conn := c.conn
+	c.connMtx.RUnlock()
+
+	if conn == nil {
+		// if connection is not initialized indicate this with err in order to hook into retry logic
+		err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved")
+	} else {
+		bytesWritten, err = conn.Write(b)
+	}
+
+	if err == nil {
+		return bytesWritten, nil
+	}
+
+	// attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again
+	if reconnErr := c.attemptResolveAndDial(); reconnErr == nil {
+		c.connMtx.RLock()
+		conn := c.conn
+		c.connMtx.RUnlock()
+
+		return conn.Write(b)
+	}
+
+	// return original error if reconn fails
+	return bytesWritten, err
+}
+
+// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation.
+func (c *reconnectingUDPConn) Close() error {
+	close(c.closeChan)
+
+	// acquire rw lock before closing conn to ensure calls to Write drain
+	c.connMtx.Lock()
+	defer c.connMtx.Unlock()
+
+	if c.conn != nil {
+		return c.conn.Close()
+	}
+
+	return nil
+}
+
+// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held
+// and SetWriteBuffer is called store bufferBytes to be set for new conns.
+func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error {
+	var err error
+
+	c.connMtx.RLock()
+	conn := c.conn
+	c.connMtx.RUnlock()
+
+	if conn != nil {
+		err = c.conn.SetWriteBuffer(bytes)
+	}
+
+	if err == nil {
+		atomic.StoreInt64(&c.bufferBytes, int64(bytes))
+	}
+
+	return err
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go
new file mode 100644
index 000000000..f65e3a678
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go
@@ -0,0 +1,339 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"time"
+
+	"github.com/go-logr/logr"
+	"github.com/go-logr/stdr"
+
+	gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
+	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
+)
+
+// batchUploader send a batch of spans to Jaeger.
+type batchUploader interface {
+	upload(context.Context, *gen.Batch) error
+	shutdown(context.Context) error
+}
+
+// EndpointOption configures a Jaeger endpoint.
+type EndpointOption interface {
+	newBatchUploader() (batchUploader, error)
+}
+
+type endpointOptionFunc func() (batchUploader, error)
+
+func (fn endpointOptionFunc) newBatchUploader() (batchUploader, error) {
+	return fn()
+}
+
+// WithAgentEndpoint configures the Jaeger exporter to send spans to a Jaeger agent
+// over compact thrift protocol. This will use the following environment variables for
+// configuration if no explicit option is provided:
+//
+// - OTEL_EXPORTER_JAEGER_AGENT_HOST is used for the agent address host
+// - OTEL_EXPORTER_JAEGER_AGENT_PORT is used for the agent address port
+//
+// The passed options will take precedence over any environment variables and default values
+// will be used if neither are provided.
+func WithAgentEndpoint(options ...AgentEndpointOption) EndpointOption {
+	return endpointOptionFunc(func() (batchUploader, error) {
+		cfg := agentEndpointConfig{
+			agentClientUDPParams{
+				AttemptReconnecting: true,
+				Host:                envOr(envAgentHost, "localhost"),
+				Port:                envOr(envAgentPort, "6831"),
+			},
+		}
+		for _, opt := range options {
+			cfg = opt.apply(cfg)
+		}
+
+		client, err := newAgentClientUDP(cfg.agentClientUDPParams)
+		if err != nil {
+			return nil, err
+		}
+
+		return &agentUploader{client: client}, nil
+	})
+}
+
+// AgentEndpointOption configures a Jaeger agent endpoint.
+type AgentEndpointOption interface {
+	apply(agentEndpointConfig) agentEndpointConfig
+}
+
+type agentEndpointConfig struct {
+	agentClientUDPParams
+}
+
+type agentEndpointOptionFunc func(agentEndpointConfig) agentEndpointConfig
+
+func (fn agentEndpointOptionFunc) apply(cfg agentEndpointConfig) agentEndpointConfig {
+	return fn(cfg)
+}
+
+// WithAgentHost sets a host to be used in the agent client endpoint.
+// This option overrides any value set for the
+// OTEL_EXPORTER_JAEGER_AGENT_HOST environment variable.
+// If this option is not passed and the env var is not set, "localhost" will be used by default.
+func WithAgentHost(host string) AgentEndpointOption {
+	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
+		o.Host = host
+		return o
+	})
+}
+
+// WithAgentPort sets a port to be used in the agent client endpoint.
+// This option overrides any value set for the
+// OTEL_EXPORTER_JAEGER_AGENT_PORT environment variable.
+// If this option is not passed and the env var is not set, "6831" will be used by default.
+func WithAgentPort(port string) AgentEndpointOption {
+	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
+		o.Port = port
+		return o
+	})
+}
+
+var emptyLogger = logr.Logger{}
+
+// WithLogger sets a logger to be used by agent client.
+// WithLogger and WithLogr will overwrite each other.
+func WithLogger(logger *log.Logger) AgentEndpointOption {
+	return WithLogr(stdr.New(logger))
+}
+
+// WithLogr sets a logr.Logger to be used by agent client.
+// WithLogr and WithLogger will overwrite each other.
+func WithLogr(logger logr.Logger) AgentEndpointOption {
+	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
+		o.Logger = logger
+		return o
+	})
+}
+
+// WithDisableAttemptReconnecting sets option to disable reconnecting udp client.
+func WithDisableAttemptReconnecting() AgentEndpointOption {
+	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
+		o.AttemptReconnecting = false
+		return o
+	})
+}
+
+// WithAttemptReconnectingInterval sets the interval between attempts to re resolve agent endpoint.
+func WithAttemptReconnectingInterval(interval time.Duration) AgentEndpointOption {
+	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
+		o.AttemptReconnectInterval = interval
+		return o
+	})
+}
+
+// WithMaxPacketSize sets the maximum UDP packet size for transport to the Jaeger agent.
+func WithMaxPacketSize(size int) AgentEndpointOption {
+	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
+		o.MaxPacketSize = size
+		return o
+	})
+}
+
+// WithCollectorEndpoint defines the full URL to the Jaeger HTTP Thrift collector. This will
+// use the following environment variables for configuration if no explicit option is provided:
+//
+// - OTEL_EXPORTER_JAEGER_ENDPOINT is the HTTP endpoint for sending spans directly to a collector.
+// - OTEL_EXPORTER_JAEGER_USER is the username to be sent as authentication to the collector endpoint.
+// - OTEL_EXPORTER_JAEGER_PASSWORD is the password to be sent as authentication to the collector endpoint.
+//
+// The passed options will take precedence over any environment variables.
+// If neither values are provided for the endpoint, the default value of "http://localhost:14268/api/traces" will be used.
+// If neither values are provided for the username or the password, they will not be set since there is no default.
+func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption {
+	return endpointOptionFunc(func() (batchUploader, error) {
+		cfg := collectorEndpointConfig{
+			endpoint:   envOr(envEndpoint, "http://localhost:14268/api/traces"),
+			username:   envOr(envUser, ""),
+			password:   envOr(envPassword, ""),
+			httpClient: http.DefaultClient,
+		}
+
+		for _, opt := range options {
+			cfg = opt.apply(cfg)
+		}
+
+		return &collectorUploader{
+			endpoint:   cfg.endpoint,
+			username:   cfg.username,
+			password:   cfg.password,
+			httpClient: cfg.httpClient,
+		}, nil
+	})
+}
+
+// CollectorEndpointOption configures a Jaeger collector endpoint.
+type CollectorEndpointOption interface {
+	apply(collectorEndpointConfig) collectorEndpointConfig
+}
+
+type collectorEndpointConfig struct {
+	// endpoint for sending spans directly to a collector.
+	endpoint string
+
+	// username to be used for authentication with the collector endpoint.
+	username string
+
+	// password to be used for authentication with the collector endpoint.
+	password string
+
+	// httpClient to be used to make requests to the collector endpoint.
+	httpClient *http.Client
+}
+
+type collectorEndpointOptionFunc func(collectorEndpointConfig) collectorEndpointConfig
+
+func (fn collectorEndpointOptionFunc) apply(cfg collectorEndpointConfig) collectorEndpointConfig {
+	return fn(cfg)
+}
+
+// WithEndpoint is the URL for the Jaeger collector that spans are sent to.
+// This option overrides any value set for the
+// OTEL_EXPORTER_JAEGER_ENDPOINT environment variable.
+// If this option is not passed and the environment variable is not set,
+// "http://localhost:14268/api/traces" will be used by default.
+func WithEndpoint(endpoint string) CollectorEndpointOption {
+	return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
+		o.endpoint = endpoint
+		return o
+	})
+}
+
+// WithUsername sets the username to be used in the authorization header sent for all requests to the collector.
+// This option overrides any value set for the
+// OTEL_EXPORTER_JAEGER_USER environment variable.
+// If this option is not passed and the environment variable is not set, no username will be set.
+func WithUsername(username string) CollectorEndpointOption {
+	return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
+		o.username = username
+		return o
+	})
+}
+
+// WithPassword sets the password to be used in the authorization header sent for all requests to the collector.
+// This option overrides any value set for the
+// OTEL_EXPORTER_JAEGER_PASSWORD environment variable.
+// If this option is not passed and the environment variable is not set, no password will be set.
+func WithPassword(password string) CollectorEndpointOption {
+	return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
+		o.password = password
+		return o
+	})
+}
+
+// WithHTTPClient sets the http client to be used to make request to the collector endpoint.
+func WithHTTPClient(client *http.Client) CollectorEndpointOption {
+	return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
+		o.httpClient = client
+		return o
+	})
+}
+
+// agentUploader implements batchUploader interface sending batches to
+// Jaeger through the UDP agent.
+type agentUploader struct {
+	client *agentClientUDP
+}
+
+var _ batchUploader = (*agentUploader)(nil)
+
+func (a *agentUploader) shutdown(ctx context.Context) error {
+	done := make(chan error, 1)
+	go func() {
+		done <- a.client.Close()
+	}()
+
+	select {
+	case <-ctx.Done():
+		// Prioritize not blocking the calling thread and just leak the
+		// spawned goroutine to close the client.
+		return ctx.Err()
+	case err := <-done:
+		return err
+	}
+}
+
+func (a *agentUploader) upload(ctx context.Context, batch *gen.Batch) error {
+	return a.client.EmitBatch(ctx, batch)
+}
+
+// collectorUploader implements batchUploader interface sending batches to
+// Jaeger through the collector http endpoint.
+type collectorUploader struct {
+	endpoint   string
+	username   string
+	password   string
+	httpClient *http.Client
+}
+
+var _ batchUploader = (*collectorUploader)(nil)
+
+func (c *collectorUploader) shutdown(ctx context.Context) error {
+	// The Exporter will cancel any active exports and will prevent all
+	// subsequent exports, so nothing to do here.
+	return nil
+}
+
+func (c *collectorUploader) upload(ctx context.Context, batch *gen.Batch) error {
+	body, err := serialize(batch)
+	if err != nil {
+		return err
+	}
+	req, err := http.NewRequestWithContext(ctx, "POST", c.endpoint, body)
+	if err != nil {
+		return err
+	}
+	if c.username != "" && c.password != "" {
+		req.SetBasicAuth(c.username, c.password)
+	}
+	req.Header.Set("Content-Type", "application/x-thrift")
+
+	resp, err := c.httpClient.Do(req)
+	if err != nil {
+		return err
+	}
+
+	_, _ = io.Copy(io.Discard, resp.Body)
+	if err = resp.Body.Close(); err != nil {
+		return err
+	}
+
+	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+		return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode)
+	}
+	return nil
+}
+
+func serialize(obj thrift.TStruct) (*bytes.Buffer, error) {
+	buf := thrift.NewTMemoryBuffer()
+	if err := obj.Write(context.Background(), thrift.NewTBinaryProtocolConf(buf, &thrift.TConfiguration{})); err != nil {
+		return nil, err
+	}
+	return buf.Buffer, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go
new file mode 100644
index 000000000..b3fd45d9d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go
@@ -0,0 +1,34 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal contains common functionality for all OTLP exporters.
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
+
+import (
+	"fmt"
+	"path"
+	"strings"
+)
+
+// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead.
+func CleanPath(urlPath string, defaultPath string) string {
+	tmp := path.Clean(strings.TrimSpace(urlPath))
+	if tmp == "." {
+		return defaultPath
+	}
+	if !path.IsAbs(tmp) {
+		tmp = fmt.Sprintf("/%s", tmp)
+	}
+	return tmp
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go
new file mode 100644
index 000000000..53ff3126b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+	GetEnv    func(string) string
+	ReadFile  func(string) ([]byte, error)
+	Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+	for _, o := range opts {
+		o(e)
+	}
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+	v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+	return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(v)
+		}
+	}
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b := strings.ToLower(v) == "true"
+			fn(b)
+		}
+	}
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			d, err := strconv.Atoi(v)
+			if err != nil {
+				global.Error(err, "parse duration", "input", v)
+				return
+			}
+			fn(time.Duration(d) * time.Millisecond)
+		}
+	}
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			fn(stringToHeader(v))
+		}
+	}
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			u, err := url.Parse(v)
+			if err != nil {
+				global.Error(err, "parse url", "input", v)
+				return
+			}
+			fn(u)
+		}
+	}
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			b, err := e.ReadFile(v)
+			if err != nil {
+				global.Error(err, "read tls ca cert file", "file", v)
+				return
+			}
+			c, err := createCertPool(b)
+			if err != nil {
+				global.Error(err, "create tls cert pool")
+				return
+			}
+			fn(c)
+		}
+	}
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+	return func(e *EnvOptionsReader) {
+		vc, okc := e.GetEnvValue(nc)
+		vk, okk := e.GetEnvValue(nk)
+		if !okc || !okk {
+			return
+		}
+		cert, err := e.ReadFile(vc)
+		if err != nil {
+			global.Error(err, "read tls client cert", "file", vc)
+			return
+		}
+		key, err := e.ReadFile(vk)
+		if err != nil {
+			global.Error(err, "read tls client key", "file", vk)
+			return
+		}
+		crt, err := tls.X509KeyPair(cert, key)
+		if err != nil {
+			global.Error(err, "create tls client key pair")
+			return
+		}
+		fn(crt)
+	}
+}
+
+func keyWithNamespace(ns, key string) string {
+	if ns == "" {
+		return key
+	}
+	return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+	headersPairs := strings.Split(value, ",")
+	headers := make(map[string]string)
+
+	for _, header := range headersPairs {
+		nameValue := strings.SplitN(header, "=", 2)
+		if len(nameValue) < 2 {
+			global.Error(errors.New("missing '="), "parse headers", "input", nameValue)
+			continue
+		}
+		name, err := url.QueryUnescape(nameValue[0])
+		if err != nil {
+			global.Error(err, "escape header key", "key", nameValue[0])
+			continue
+		}
+		trimmedName := strings.TrimSpace(name)
+		value, err := url.QueryUnescape(nameValue[1])
+		if err != nil {
+			global.Error(err, "escape header value", "value", nameValue[1])
+			continue
+		}
+		trimmedValue := strings.TrimSpace(value)
+
+		headers[trimmedName] = trimmedValue
+	}
+
+	return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+	return cp, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go
new file mode 100644
index 000000000..9aa62ed9e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal contains common functionality for all OTLP exporters.
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
+
+import "go.opentelemetry.io/otel"
+
+// GetUserAgentHeader return an OTLP header value form "OTel OTLP Exporter Go/{{ .Version }}"
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#user-agent
+func GetUserAgentHeader() string {
+	return "OTel OTLP Exporter Go/" + otel.Version()
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go
new file mode 100644
index 000000000..9ab89b375
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go
@@ -0,0 +1,64 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages.  Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+	ErrorMessage  string
+	RejectedItems int64
+	RejectedKind  string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+	msg := ps.ErrorMessage
+	if msg == "" {
+		msg = "empty message"
+	}
+	return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+	_, ok := err.(PartialSuccess)
+	return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "spans",
+	}
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+	return PartialSuccess{
+		ErrorMessage:  errorMessage,
+		RejectedItems: itemsRejected,
+		RejectedKind:  "metric data points",
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go
new file mode 100644
index 000000000..7e1b0055a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go
@@ -0,0 +1,153 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+	Enabled:         true,
+	InitialInterval: 5 * time.Second,
+	MaxInterval:     30 * time.Second,
+	MaxElapsedTime:  time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+	// Enabled indicates whether to not retry sending batches in case of
+	// export failure.
+	Enabled bool
+	// InitialInterval the time to wait after the first failure before
+	// retrying.
+	InitialInterval time.Duration
+	// MaxInterval is the upper bound on backoff interval. Once this value is
+	// reached the delay between consecutive retries will always be
+	// `MaxInterval`.
+	MaxInterval time.Duration
+	// MaxElapsedTime is the maximum amount of time (including retries) spent
+	// trying to send a request/batch.  Once this value is reached, the data
+	// is discarded.
+	MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+	if !c.Enabled {
+		return func(ctx context.Context, fn func(context.Context) error) error {
+			return fn(ctx)
+		}
+	}
+
+	return func(ctx context.Context, fn func(context.Context) error) error {
+		// Do not use NewExponentialBackOff since it calls Reset and the code here
+		// must call Reset after changing the InitialInterval (this saves an
+		// unnecessary call to Now).
+		b := &backoff.ExponentialBackOff{
+			InitialInterval:     c.InitialInterval,
+			RandomizationFactor: backoff.DefaultRandomizationFactor,
+			Multiplier:          backoff.DefaultMultiplier,
+			MaxInterval:         c.MaxInterval,
+			MaxElapsedTime:      c.MaxElapsedTime,
+			Stop:                backoff.Stop,
+			Clock:               backoff.SystemClock,
+		}
+		b.Reset()
+
+		for {
+			err := fn(ctx)
+			if err == nil {
+				return nil
+			}
+
+			retryable, throttle := evaluate(err)
+			if !retryable {
+				return err
+			}
+
+			bOff := b.NextBackOff()
+			if bOff == backoff.Stop {
+				return fmt.Errorf("max retry time elapsed: %w", err)
+			}
+
+			// Wait for the greater of the backoff or throttle delay.
+			var delay time.Duration
+			if bOff > throttle {
+				delay = bOff
+			} else {
+				elapsed := b.GetElapsedTime()
+				if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+					return fmt.Errorf("max retry time would elapse: %w", err)
+				}
+				delay = throttle
+			}
+
+			if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+				return fmt.Errorf("%w: %s", ctxErr, err)
+			}
+		}
+	}
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait.  It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline.  This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+	timer := time.NewTimer(delay)
+	defer timer.Stop()
+
+	select {
+	case <-ctx.Done():
+		// Handle the case where the timer and context deadline end
+		// simultaneously by prioritizing the timer expiration nil value
+		// response.
+		select {
+		case <-timer.C:
+		default:
+			return ctx.Err()
+		}
+	case <-timer.C:
+	}
+
+	return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go
new file mode 100644
index 000000000..217751da5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
+
+// ErrorKind is used to identify the kind of export error
+// being wrapped.
+type ErrorKind int
+
+const (
+	// TracesExport indicates the error comes from the OTLP trace exporter.
+	TracesExport ErrorKind = iota
+)
+
+// prefix returns a prefix for the Error() string.
+func (k ErrorKind) prefix() string {
+	switch k {
+	case TracesExport:
+		return "traces export: "
+	default:
+		return "unknown: "
+	}
+}
+
+// wrappedExportError wraps an OTLP exporter error with the kind of
+// signal that produced it.
+type wrappedExportError struct {
+	wrap error
+	kind ErrorKind
+}
+
+// WrapTracesError wraps an error from the OTLP exporter for traces.
+func WrapTracesError(err error) error {
+	return wrappedExportError{
+		wrap: err,
+		kind: TracesExport,
+	}
+}
+
+var _ error = wrappedExportError{}
+
+// Error attaches a prefix corresponding to the kind of exporter.
+func (t wrappedExportError) Error() string {
+	return t.kind.prefix() + t.wrap.Error()
+}
+
+// Unwrap returns the wrapped error.
+func (t wrappedExportError) Unwrap() error {
+	return t.wrap
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
new file mode 100644
index 000000000..6e9cc0366
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
@@ -0,0 +1,51 @@
+# OpenTelemetry-Go OTLP Span Exporter
+
+[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
+
+[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.5.0/specification/protocol/exporter.md) implementation.
+
+## Installation
+
+```
+go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace
+```
+
+## Examples
+
+- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go)
+- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector)
+
+## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
+
+The `otlptrace` package provides an exporter implementing the OTel span exporter interface.
+This exporter is configured using a client satisfying the `otlptrace.Client` interface.
+This client handles the transformation of data into wire format and the transmission of that data to the collector.
+
+## [`otlptracegrpc`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
+
+The `otlptracegrpc` package implements a client for the span exporter that sends trace telemetry data to the collector using gRPC with protobuf-encoded payloads.
+
+## [`otlptracehttp`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)
+
+The `otlptracehttp` package implements a client for the span exporter that sends trace telemetry data to the collector using HTTP with protobuf-encoded payloads.
+
+## Configuration
+
+### Environment Variables
+
+The following environment variables can be used (instead of options objects) to
+override the default configuration. For more information about how each of
+these environment variables is interpreted, see [the OpenTelemetry
+specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md).
+
+| Environment variable                                                     | Option                        | Default value                                            |
+| ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- |
+| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`       | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] |
+| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig`         |                                                          |
+| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS`         | `WithHeaders`                 |                                                          |
+| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression`             |                                                          |
+| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`         | `WithTimeout`                 | `10s`                                                    |
+
+[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`.
+
+Configuration using options have precedence over the environment variables.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
new file mode 100644
index 000000000..dbb40cf58
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
@@ -0,0 +1,54 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+	"context"
+
+	tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Client manages connections to the collector, handles the
+// transformation of data into wire format, and the transmission of that
+// data to the collector.
+type Client interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Start should establish connection(s) to endpoint(s). It is
+	// called just once by the exporter, so the implementation
+	// does not need to worry about idempotence and locking.
+	Start(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Stop should close the connections. The function is called
+	// only once by the exporter, so the implementation does not
+	// need to worry about idempotence, but it may be called
+	// concurrently with UploadTraces, so proper
+	// locking is required. The function serves as a
+	// synchronization point - after the function returns, the
+	// process of closing connections is assumed to be finished.
+	Stop(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// UploadTraces should transform the passed traces to the wire
+	// format and send it to the collector. May be called
+	// concurrently.
+	UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
new file mode 100644
index 000000000..b65802edb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -0,0 +1,118 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+	"context"
+	"errors"
+	"sync"
+
+	"go.opentelemetry.io/otel/exporters/otlp/internal"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+	tracesdk "go.opentelemetry.io/otel/sdk/trace"
+)
+
+var (
+	errAlreadyStarted = errors.New("already started")
+)
+
+// Exporter exports trace data in the OTLP wire format.
+type Exporter struct {
+	client Client
+
+	mu      sync.RWMutex
+	started bool
+
+	startOnce sync.Once
+	stopOnce  sync.Once
+}
+
+// ExportSpans exports a batch of spans.
+func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
+	protoSpans := tracetransform.Spans(ss)
+	if len(protoSpans) == 0 {
+		return nil
+	}
+
+	err := e.client.UploadTraces(ctx, protoSpans)
+	if err != nil {
+		return internal.WrapTracesError(err)
+	}
+	return nil
+}
+
+// Start establishes a connection to the receiving endpoint.
+func (e *Exporter) Start(ctx context.Context) error {
+	var err = errAlreadyStarted
+	e.startOnce.Do(func() {
+		e.mu.Lock()
+		e.started = true
+		e.mu.Unlock()
+		err = e.client.Start(ctx)
+	})
+
+	return err
+}
+
+// Shutdown flushes all exports and closes all connections to the receiving endpoint.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+	e.mu.RLock()
+	started := e.started
+	e.mu.RUnlock()
+
+	if !started {
+		return nil
+	}
+
+	var err error
+
+	e.stopOnce.Do(func() {
+		err = e.client.Stop(ctx)
+		e.mu.Lock()
+		e.started = false
+		e.mu.Unlock()
+	})
+
+	return err
+}
+
+var _ tracesdk.SpanExporter = (*Exporter)(nil)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, client Client) (*Exporter, error) {
+	exp := NewUnstarted(client)
+	if err := exp.Start(ctx); err != nil {
+		return nil, err
+	}
+	return exp, nil
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(client Client) *Exporter {
+	return &Exporter{
+		client: client,
+	}
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+func (e *Exporter) MarshalLog() interface{} {
+	return struct {
+		Type   string
+		Client Client
+	}{
+		Type:   "otlptrace",
+		Client: e.client,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go
new file mode 100644
index 000000000..62c5029db
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go
@@ -0,0 +1,150 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"net/url"
+	"os"
+	"path"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+	GetEnv:    os.Getenv,
+	ReadFile:  os.ReadFile,
+	Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+	return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+	opts := getOptionsFromEnv()
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+	opts := []GenericOption{}
+
+	tlsConf := &tls.Config{}
+	DefaultEnvOptionsReader.Apply(
+		envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Traces.Endpoint = u.Host
+				// For OTLP/HTTP endpoint URLs without a per-signal
+				// configuration, the passed endpoint is used as a base URL
+				// and the signals are sent to these paths relative to that.
+				cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
+			opts = append(opts, withEndpointScheme(u))
+			opts = append(opts, newSplitOption(func(cfg Config) Config {
+				cfg.Traces.Endpoint = u.Host
+				// For endpoint URLs for OTLP/HTTP per-signal variables, the
+				// URL MUST be used as-is without any modification. The only
+				// exception is that if an URL contains no path part, the root
+				// path / MUST be used.
+				path := u.Path
+				if path == "" {
+					path = "/"
+				}
+				cfg.Traces.URLPath = path
+				return cfg
+			}, withEndpointForGRPC(u)))
+		}),
+		envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+		envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+		withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+		envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+		envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+		WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+		envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+		envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+	)
+
+	return opts
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+	switch strings.ToLower(u.Scheme) {
+	case "http", "unix":
+		return WithInsecure()
+	default:
+		return WithSecure()
+	}
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+	return func(cfg Config) Config {
+		// For OTLP/gRPC endpoints, this is the target to which the
+		// exporter is going to send telemetry.
+		cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
+		return cfg
+	}
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if v, ok := e.GetEnvValue(n); ok {
+			cp := NoCompression
+			if v == "gzip" {
+				cp = GzipCompression
+			}
+
+			fn(cp)
+		}
+	}
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+	if b {
+		return WithInsecure()
+	}
+	return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+	return func(e *envconfig.EnvOptionsReader) {
+		if c.RootCAs != nil || len(c.Certificates) > 0 {
+			fn(c)
+		}
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go
new file mode 100644
index 000000000..c48ffd530
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go
@@ -0,0 +1,308 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+
+import (
+	"crypto/tls"
+	"fmt"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/insecure"
+	"google.golang.org/grpc/encoding/gzip"
+
+	"go.opentelemetry.io/otel/exporters/otlp/internal"
+	"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
+)
+
+const (
+	// DefaultTracesPath is a default URL path for endpoint that
+	// receives spans.
+	DefaultTracesPath string = "/v1/traces"
+	// DefaultTimeout is a default max waiting time for the backend to process
+	// each span batch.
+	DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+	SignalConfig struct {
+		Endpoint    string
+		Insecure    bool
+		TLSCfg      *tls.Config
+		Headers     map[string]string
+		Compression Compression
+		Timeout     time.Duration
+		URLPath     string
+
+		// gRPC configurations
+		GRPCCredentials credentials.TransportCredentials
+	}
+
+	Config struct {
+		// Signal specific configurations
+		Traces SignalConfig
+
+		RetryConfig retry.Config
+
+		// gRPC configurations
+		ReconnectionPeriod time.Duration
+		ServiceConfig      string
+		DialOptions        []grpc.DialOption
+		GRPCConn           *grpc.ClientConn
+	}
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+	cfg := Config{
+		Traces: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+			URLPath:     DefaultTracesPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+		},
+		RetryConfig: retry.DefaultConfig,
+	}
+	cfg = ApplyHTTPEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyHTTPOption(cfg)
+	}
+	cfg.Traces.URLPath = internal.CleanPath(cfg.Traces.URLPath, DefaultTracesPath)
+	return cfg
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+	cfg := Config{
+		Traces: SignalConfig{
+			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+			URLPath:     DefaultTracesPath,
+			Compression: NoCompression,
+			Timeout:     DefaultTimeout,
+		},
+		RetryConfig: retry.DefaultConfig,
+		DialOptions: []grpc.DialOption{grpc.WithUserAgent(internal.GetUserAgentHeader())},
+	}
+	cfg = ApplyGRPCEnvConfigs(cfg)
+	for _, opt := range opts {
+		cfg = opt.ApplyGRPCOption(cfg)
+	}
+
+	if cfg.ServiceConfig != "" {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+	}
+	// Priroritize GRPCCredentials over Insecure (passing both is an error).
+	if cfg.Traces.GRPCCredentials != nil {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
+	} else if cfg.Traces.Insecure {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+	} else {
+		// Default to using the host's root CA.
+		creds := credentials.NewTLS(nil)
+		cfg.Traces.GRPCCredentials = creds
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+	}
+	if cfg.Traces.Compression == GzipCompression {
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+	}
+	if len(cfg.DialOptions) != 0 {
+		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
+	}
+	if cfg.ReconnectionPeriod != 0 {
+		p := grpc.ConnectParams{
+			Backoff:           backoff.DefaultConfig,
+			MinConnectTimeout: cfg.ReconnectionPeriod,
+		}
+		cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+	}
+
+	return cfg
+}
+
+type (
+	// GenericOption applies an option to the HTTP or gRPC driver.
+	GenericOption interface {
+		ApplyHTTPOption(Config) Config
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// HTTPOption applies an option to the HTTP driver.
+	HTTPOption interface {
+		ApplyHTTPOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+
+	// GRPCOption applies an option to the gRPC driver.
+	GRPCOption interface {
+		ApplyGRPCOption(Config) Config
+
+		// A private method to prevent users implementing the
+		// interface and so future additions to it will not
+		// violate compatibility.
+		private()
+	}
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+	fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+	return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+	return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+	httpFn func(Config) Config
+	grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+	return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+	return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+	return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+	fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+	return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+	fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+	return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+	return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+func WithEndpoint(endpoint string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Endpoint = endpoint
+		return cfg
+	})
+}
+
+func WithCompression(compression Compression) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Compression = compression
+		return cfg
+	})
+}
+
+func WithURLPath(urlPath string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.URLPath = urlPath
+		return cfg
+	})
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.RetryConfig = rc
+		return cfg
+	})
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+	return newSplitOption(func(cfg Config) Config {
+		cfg.Traces.TLSCfg = tlsCfg.Clone()
+		return cfg
+	}, func(cfg Config) Config {
+		cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
+		return cfg
+	})
+}
+
+func WithInsecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Insecure = true
+		return cfg
+	})
+}
+
+func WithSecure() GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Insecure = false
+		return cfg
+	})
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Headers = headers
+		return cfg
+	})
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+	return newGenericOption(func(cfg Config) Config {
+		cfg.Traces.Timeout = duration
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go
new file mode 100644
index 000000000..c2d6c0361
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go
@@ -0,0 +1,48 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+
+const (
+	// DefaultCollectorGRPCPort is the default gRPC port of the collector.
+	DefaultCollectorGRPCPort uint16 = 4317
+	// DefaultCollectorHTTPPort is the default HTTP port of the collector.
+	DefaultCollectorHTTPPort uint16 = 4318
+	// DefaultCollectorHost is the host address the Exporter will attempt
+	// connect to if no collector address is provided.
+	DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+	// NoCompression tells the driver to send payloads without
+	// compression.
+	NoCompression Compression = iota
+	// GzipCompression tells the driver to send payloads after
+	// compressing them with gzip.
+	GzipCompression
+)
+
+// Marshaler describes the kind of message format sent to the collector.
+type Marshaler int
+
+const (
+	// MarshalProto tells the driver to send using the protobuf binary format.
+	MarshalProto Marshaler = iota
+	// MarshalJSON tells the driver to send using json format.
+	MarshalJSON
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go
new file mode 100644
index 000000000..7287cf6cf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go
@@ -0,0 +1,34 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+)
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+	cp := x509.NewCertPool()
+	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+		return nil, errors.New("failed to append certificate to the cert pool")
+	}
+
+	return &tls.Config{
+		RootCAs: cp,
+	}, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
new file mode 100644
index 000000000..ec74f1aad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
@@ -0,0 +1,158 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/resource"
+	commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
+	if len(attrs) == 0 {
+		return nil
+	}
+
+	out := make([]*commonpb.KeyValue, 0, len(attrs))
+	for _, kv := range attrs {
+		out = append(out, KeyValue(kv))
+	}
+	return out
+}
+
+// Iterator transforms an attribute iterator into OTLP key-values.
+func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
+	l := iter.Len()
+	if l == 0 {
+		return nil
+	}
+
+	out := make([]*commonpb.KeyValue, 0, l)
+	for iter.Next() {
+		out = append(out, KeyValue(iter.Attribute()))
+	}
+	return out
+}
+
+// ResourceAttributes transforms a Resource OTLP key-values.
+func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
+	return Iterator(res.Iter())
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
+	return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *commonpb.AnyValue {
+	av := new(commonpb.AnyValue)
+	switch v.Type() {
+	case attribute.BOOL:
+		av.Value = &commonpb.AnyValue_BoolValue{
+			BoolValue: v.AsBool(),
+		}
+	case attribute.BOOLSLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: boolSliceValues(v.AsBoolSlice()),
+			},
+		}
+	case attribute.INT64:
+		av.Value = &commonpb.AnyValue_IntValue{
+			IntValue: v.AsInt64(),
+		}
+	case attribute.INT64SLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: int64SliceValues(v.AsInt64Slice()),
+			},
+		}
+	case attribute.FLOAT64:
+		av.Value = &commonpb.AnyValue_DoubleValue{
+			DoubleValue: v.AsFloat64(),
+		}
+	case attribute.FLOAT64SLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: float64SliceValues(v.AsFloat64Slice()),
+			},
+		}
+	case attribute.STRING:
+		av.Value = &commonpb.AnyValue_StringValue{
+			StringValue: v.AsString(),
+		}
+	case attribute.STRINGSLICE:
+		av.Value = &commonpb.AnyValue_ArrayValue{
+			ArrayValue: &commonpb.ArrayValue{
+				Values: stringSliceValues(v.AsStringSlice()),
+			},
+		}
+	default:
+		av.Value = &commonpb.AnyValue_StringValue{
+			StringValue: "INVALID",
+		}
+	}
+	return av
+}
+
+func boolSliceValues(vals []bool) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_BoolValue{
+				BoolValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func int64SliceValues(vals []int64) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_IntValue{
+				IntValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func float64SliceValues(vals []float64) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_DoubleValue{
+				DoubleValue: v,
+			},
+		}
+	}
+	return converted
+}
+
+func stringSliceValues(vals []string) []*commonpb.AnyValue {
+	converted := make([]*commonpb.AnyValue, len(vals))
+	for i, v := range vals {
+		converted[i] = &commonpb.AnyValue{
+			Value: &commonpb.AnyValue_StringValue{
+				StringValue: v,
+			},
+		}
+	}
+	return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
new file mode 100644
index 000000000..7aaec38d2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
@@ -0,0 +1,30 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
+	if il == (instrumentation.Scope{}) {
+		return nil
+	}
+	return &commonpb.InstrumentationScope{
+		Name:    il.Name,
+		Version: il.Version,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
new file mode 100644
index 000000000..05a1f78ad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+	"go.opentelemetry.io/otel/sdk/resource"
+	resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// Resource transforms a Resource into an OTLP Resource.
+func Resource(r *resource.Resource) *resourcepb.Resource {
+	if r == nil {
+		return nil
+	}
+	return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
new file mode 100644
index 000000000..b83cbd724
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
@@ -0,0 +1,205 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	tracesdk "go.opentelemetry.io/otel/sdk/trace"
+	"go.opentelemetry.io/otel/trace"
+	tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
+// ResourceSpans.
+func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
+	if len(sdl) == 0 {
+		return nil
+	}
+
+	rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
+
+	type key struct {
+		r  attribute.Distinct
+		is instrumentation.Scope
+	}
+	ssm := make(map[key]*tracepb.ScopeSpans)
+
+	var resources int
+	for _, sd := range sdl {
+		if sd == nil {
+			continue
+		}
+
+		rKey := sd.Resource().Equivalent()
+		k := key{
+			r:  rKey,
+			is: sd.InstrumentationScope(),
+		}
+		scopeSpan, iOk := ssm[k]
+		if !iOk {
+			// Either the resource or instrumentation scope were unknown.
+			scopeSpan = &tracepb.ScopeSpans{
+				Scope:     InstrumentationScope(sd.InstrumentationScope()),
+				Spans:     []*tracepb.Span{},
+				SchemaUrl: sd.InstrumentationScope().SchemaURL,
+			}
+		}
+		scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
+		ssm[k] = scopeSpan
+
+		rs, rOk := rsm[rKey]
+		if !rOk {
+			resources++
+			// The resource was unknown.
+			rs = &tracepb.ResourceSpans{
+				Resource:   Resource(sd.Resource()),
+				ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
+				SchemaUrl:  sd.Resource().SchemaURL(),
+			}
+			rsm[rKey] = rs
+			continue
+		}
+
+		// The resource has been seen before. Check if the instrumentation
+		// library lookup was unknown because if so we need to add it to the
+		// ResourceSpans. Otherwise, the instrumentation library has already
+		// been seen and the append we did above will be included it in the
+		// ScopeSpans reference.
+		if !iOk {
+			rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
+		}
+	}
+
+	// Transform the categorized map into a slice
+	rss := make([]*tracepb.ResourceSpans, 0, resources)
+	for _, rs := range rsm {
+		rss = append(rss, rs)
+	}
+	return rss
+}
+
+// span transforms a Span into an OTLP span.
+func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
+	if sd == nil {
+		return nil
+	}
+
+	tid := sd.SpanContext().TraceID()
+	sid := sd.SpanContext().SpanID()
+
+	s := &tracepb.Span{
+		TraceId:                tid[:],
+		SpanId:                 sid[:],
+		TraceState:             sd.SpanContext().TraceState().String(),
+		Status:                 status(sd.Status().Code, sd.Status().Description),
+		StartTimeUnixNano:      uint64(sd.StartTime().UnixNano()),
+		EndTimeUnixNano:        uint64(sd.EndTime().UnixNano()),
+		Links:                  links(sd.Links()),
+		Kind:                   spanKind(sd.SpanKind()),
+		Name:                   sd.Name(),
+		Attributes:             KeyValues(sd.Attributes()),
+		Events:                 spanEvents(sd.Events()),
+		DroppedAttributesCount: uint32(sd.DroppedAttributes()),
+		DroppedEventsCount:     uint32(sd.DroppedEvents()),
+		DroppedLinksCount:      uint32(sd.DroppedLinks()),
+	}
+
+	if psid := sd.Parent().SpanID(); psid.IsValid() {
+		s.ParentSpanId = psid[:]
+	}
+
+	return s
+}
+
+// status transform a span code and message into an OTLP span status.
+func status(status codes.Code, message string) *tracepb.Status {
+	var c tracepb.Status_StatusCode
+	switch status {
+	case codes.Ok:
+		c = tracepb.Status_STATUS_CODE_OK
+	case codes.Error:
+		c = tracepb.Status_STATUS_CODE_ERROR
+	default:
+		c = tracepb.Status_STATUS_CODE_UNSET
+	}
+	return &tracepb.Status{
+		Code:    c,
+		Message: message,
+	}
+}
+
+// links transforms span Links to OTLP span links.
+func links(links []tracesdk.Link) []*tracepb.Span_Link {
+	if len(links) == 0 {
+		return nil
+	}
+
+	sl := make([]*tracepb.Span_Link, 0, len(links))
+	for _, otLink := range links {
+		// This redefinition is necessary to prevent otLink.*ID[:] copies
+		// being reused -- in short we need a new otLink per iteration.
+		otLink := otLink
+
+		tid := otLink.SpanContext.TraceID()
+		sid := otLink.SpanContext.SpanID()
+
+		sl = append(sl, &tracepb.Span_Link{
+			TraceId:                tid[:],
+			SpanId:                 sid[:],
+			Attributes:             KeyValues(otLink.Attributes),
+			DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
+		})
+	}
+	return sl
+}
+
+// spanEvents transforms span Events to an OTLP span events.
+func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
+	if len(es) == 0 {
+		return nil
+	}
+
+	events := make([]*tracepb.Span_Event, len(es))
+	// Transform message events
+	for i := 0; i < len(es); i++ {
+		events[i] = &tracepb.Span_Event{
+			Name:                   es[i].Name,
+			TimeUnixNano:           uint64(es[i].Time.UnixNano()),
+			Attributes:             KeyValues(es[i].Attributes),
+			DroppedAttributesCount: uint32(es[i].DroppedAttributeCount),
+		}
+	}
+	return events
+}
+
+// spanKind transforms a SpanKind to an OTLP span kind.
+func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind {
+	switch kind {
+	case trace.SpanKindInternal:
+		return tracepb.Span_SPAN_KIND_INTERNAL
+	case trace.SpanKindClient:
+		return tracepb.Span_SPAN_KIND_CLIENT
+	case trace.SpanKindServer:
+		return tracepb.Span_SPAN_KIND_SERVER
+	case trace.SpanKindProducer:
+		return tracepb.Span_SPAN_KIND_PRODUCER
+	case trace.SpanKindConsumer:
+		return tracepb.Span_SPAN_KIND_CONSUMER
+	default:
+		return tracepb.Span_SPAN_KIND_UNSPECIFIED
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
new file mode 100644
index 000000000..fe23f8e37
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -0,0 +1,296 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+	"context"
+	"errors"
+	"sync"
+	"time"
+
+	"google.golang.org/genproto/googleapis/rpc/errdetails"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/internal"
+	"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+	coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
+	tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+type client struct {
+	endpoint      string
+	dialOpts      []grpc.DialOption
+	metadata      metadata.MD
+	exportTimeout time.Duration
+	requestFunc   retry.RequestFunc
+
+	// stopCtx is used as a parent context for all exports. Therefore, when it
+	// is canceled with the stopFunc all exports are canceled.
+	stopCtx context.Context
+	// stopFunc cancels stopCtx, stopping any active exports.
+	stopFunc context.CancelFunc
+
+	// ourConn keeps track of where conn was created: true if created here on
+	// Start, or false if passed with an option. This is important on Shutdown
+	// as the conn should only be closed if created here on start. Otherwise,
+	// it is up to the processes that passed the conn to close it.
+	ourConn bool
+	conn    *grpc.ClientConn
+	tscMu   sync.RWMutex
+	tsc     coltracepb.TraceServiceClient
+}
+
+// Compile time check *client implements otlptrace.Client.
+var _ otlptrace.Client = (*client)(nil)
+
+// NewClient creates a new gRPC trace client.
+func NewClient(opts ...Option) otlptrace.Client {
+	return newClient(opts...)
+}
+
+func newClient(opts ...Option) *client {
+	cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
+
+	ctx, cancel := context.WithCancel(context.Background())
+
+	c := &client{
+		endpoint:      cfg.Traces.Endpoint,
+		exportTimeout: cfg.Traces.Timeout,
+		requestFunc:   cfg.RetryConfig.RequestFunc(retryable),
+		dialOpts:      cfg.DialOptions,
+		stopCtx:       ctx,
+		stopFunc:      cancel,
+		conn:          cfg.GRPCConn,
+	}
+
+	if len(cfg.Traces.Headers) > 0 {
+		c.metadata = metadata.New(cfg.Traces.Headers)
+	}
+
+	return c
+}
+
+// Start establishes a gRPC connection to the collector.
+func (c *client) Start(ctx context.Context) error {
+	if c.conn == nil {
+		// If the caller did not provide a ClientConn when the client was
+		// created, create one using the configuration they did provide.
+		conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
+		if err != nil {
+			return err
+		}
+		// Keep track that we own the lifecycle of this conn and need to close
+		// it on Shutdown.
+		c.ourConn = true
+		c.conn = conn
+	}
+
+	// The otlptrace.Client interface states this method is called just once,
+	// so no need to check if already started.
+	c.tscMu.Lock()
+	c.tsc = coltracepb.NewTraceServiceClient(c.conn)
+	c.tscMu.Unlock()
+
+	return nil
+}
+
+var errAlreadyStopped = errors.New("the client is already stopped")
+
+// Stop shuts down the client.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+//
+// This method synchronizes with the UploadTraces method of the client. It
+// will wait for any active calls to that method to complete unimpeded, or it
+// will cancel any active calls if ctx expires. If ctx expires, the context
+// error will be forwarded as the returned error. All client held resources
+// will still be released in this situation.
+//
+// If the client has already stopped, an error will be returned describing
+// this.
+func (c *client) Stop(ctx context.Context) error {
+	// Acquire the c.tscMu lock within the ctx lifetime.
+	acquired := make(chan struct{})
+	go func() {
+		c.tscMu.Lock()
+		close(acquired)
+	}()
+	var err error
+	select {
+	case <-ctx.Done():
+		// The Stop timeout is reached. Kill any remaining exports to force
+		// the clear of the lock and save the timeout error to return and
+		// signal the shutdown timed out before cleanly stopping.
+		c.stopFunc()
+		err = ctx.Err()
+
+		// To ensure the client is not left in a dirty state c.tsc needs to be
+		// set to nil. To avoid the race condition when doing this, ensure
+		// that all the exports are killed (initiated by c.stopFunc).
+		<-acquired
+	case <-acquired:
+	}
+	// Hold the tscMu lock for the rest of the function to ensure no new
+	// exports are started.
+	defer c.tscMu.Unlock()
+
+	// The otlptrace.Client interface states this method is called only
+	// once, but there is no guarantee it is called after Start. Ensure the
+	// client is started before doing anything and let the called know if they
+	// made a mistake.
+	if c.tsc == nil {
+		return errAlreadyStopped
+	}
+
+	// Clear c.tsc to signal the client is stopped.
+	c.tsc = nil
+
+	if c.ourConn {
+		closeErr := c.conn.Close()
+		// A context timeout error takes precedence over this error.
+		if err == nil && closeErr != nil {
+			err = closeErr
+		}
+	}
+	return err
+}
+
+var errShutdown = errors.New("the client is shutdown")
+
+// UploadTraces sends a batch of spans.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
+	// Hold a read lock to ensure a shut down initiated after this starts does
+	// not abandon the export. This read lock acquire has less priority than a
+	// write lock acquire (i.e. Stop), meaning if the client is shutting down
+	// this will come after the shut down.
+	c.tscMu.RLock()
+	defer c.tscMu.RUnlock()
+
+	if c.tsc == nil {
+		return errShutdown
+	}
+
+	ctx, cancel := c.exportContext(ctx)
+	defer cancel()
+
+	return c.requestFunc(ctx, func(iCtx context.Context) error {
+		resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
+			ResourceSpans: protoSpans,
+		})
+		if resp != nil && resp.PartialSuccess != nil {
+			msg := resp.PartialSuccess.GetErrorMessage()
+			n := resp.PartialSuccess.GetRejectedSpans()
+			if n != 0 || msg != "" {
+				err := internal.TracePartialSuccessError(n, msg)
+				otel.Handle(err)
+			}
+		}
+		// nil is converted to OK.
+		if status.Code(err) == codes.OK {
+			// Success.
+			return nil
+		}
+		return err
+	})
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+	var (
+		ctx    context.Context
+		cancel context.CancelFunc
+	)
+
+	if c.exportTimeout > 0 {
+		ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+	} else {
+		ctx, cancel = context.WithCancel(parent)
+	}
+
+	if c.metadata.Len() > 0 {
+		ctx = metadata.NewOutgoingContext(ctx, c.metadata)
+	}
+
+	// Unify the client stopCtx with the parent.
+	go func() {
+		select {
+		case <-ctx.Done():
+		case <-c.stopCtx.Done():
+			// Cancel the export as the shutdown has timed out.
+			cancel()
+		}
+	}()
+
+	return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+	//func retryable(err error) (bool, time.Duration) {
+	s := status.Convert(err)
+	switch s.Code() {
+	case codes.Canceled,
+		codes.DeadlineExceeded,
+		codes.ResourceExhausted,
+		codes.Aborted,
+		codes.OutOfRange,
+		codes.Unavailable,
+		codes.DataLoss:
+		return true, throttleDelay(s)
+	}
+
+	// Not a retry-able error.
+	return false, 0
+}
+
+// throttleDelay returns a duration to wait for if an explicit throttle time
+// is included in the response status.
+func throttleDelay(s *status.Status) time.Duration {
+	for _, detail := range s.Details() {
+		if t, ok := detail.(*errdetails.RetryInfo); ok {
+			return t.RetryDelay.AsDuration()
+		}
+	}
+	return 0
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (c *client) MarshalLog() interface{} {
+	return struct {
+		Type     string
+		Endpoint string
+	}{
+		Type:     "otlphttpgrpc",
+		Endpoint: c.endpoint,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
new file mode 100644
index 000000000..89af41002
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
+	return otlptrace.New(ctx, NewClient(opts...))
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(opts ...Option) *otlptrace.Exporter {
+	return otlptrace.NewUnstarted(NewClient(opts...))
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
new file mode 100644
index 000000000..3d09ce590
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+	"fmt"
+	"time"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
+)
+
+// Option applies an option to the gRPC driver.
+type Option interface {
+	applyGRPCOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
+	converted := make([]otlpconfig.GRPCOption, len(opts))
+	for i, o := range opts {
+		converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
+	}
+	return converted
+}
+
+// RetryConfig defines configuration for retrying export of span batches that
+// failed to be received by the target endpoint.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+	otlpconfig.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
+	return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables client transport security for the exporter's gRPC
+// connection just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
+// default, client security is required unless WithInsecure is used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+	return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint the exporter will connect to. If
+// unset, localhost:4317 will be used as a default.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+	return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.ReconnectionPeriod = rp
+		return cfg
+	})}
+}
+
+func compressorToCompression(compressor string) otlpconfig.Compression {
+	if compressor == "gzip" {
+		return otlpconfig.GzipCompression
+	}
+
+	otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+	return otlpconfig.NoCompression
+}
+
+// WithCompressor sets the compressor for the gRPC client to use when sending
+// requests. It is the responsibility of the caller to ensure that the
+// compressor set has been registered with google.golang.org/grpc/encoding.
+// This can be done by encoding.RegisterCompressor. Some compressors
+// auto-register on import, such as gzip, which can be registered by calling
+// `import _ "google.golang.org/grpc/encoding/gzip"`.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithCompressor(compressor string) Option {
+	return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+func WithHeaders(headers map[string]string) Option {
+	return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTLSCredentials allows the connection to use TLS credentials when
+// talking to the server. It takes in grpc.TransportCredentials instead of say
+// a Certificate file or a tls.Certificate, because the retrieving of these
+// credentials can be done in many ways e.g. plain file, in code tls.Config or
+// by certificate rotation, so it is up to the caller to decide what to use.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.Traces.GRPCCredentials = creds
+		return cfg
+	})}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.ServiceConfig = serviceConfig
+		return cfg
+	})}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when making a
+// connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.DialOptions = opts
+		return cfg
+	})}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The client
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+	return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+		cfg.GRPCConn = conn
+		return cfg
+	})}
+}
+
+// WithTimeout sets the max amount of time a client will attempt to export a
+// batch of spans. This takes precedence over any retry settings defined with
+// WithRetry, once this time limit has been reached the export is abandoned
+// and the batch of spans is dropped.
+//
+// If unset, the default timeout will be set to 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+	return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that may be
+// returned by the target endpoint when exporting a batch of spans.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response. That time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+	return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
new file mode 100644
index 000000000..9a58fb1d3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+top_dir='.'
+if [[ $# -gt 0 ]]; then
+    top_dir="${1}"
+fi
+
+p=$(pwd)
+mod_dirs=()
+
+# Note `mapfile` does not exist in older bash versions:
+# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash
+
+while IFS= read -r line; do
+    mod_dirs+=("$line")
+done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+for mod_dir in "${mod_dirs[@]}"; do
+    cd "${mod_dir}"
+
+    while IFS= read -r line; do
+        echo ".${line#${p}}"
+    done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
+    cd "${p}"
+done
diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go
new file mode 100644
index 000000000..ecd363ab5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/handler.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+	"log"
+	"os"
+	"sync/atomic"
+	"unsafe"
+)
+
+var (
+	// globalErrorHandler provides an ErrorHandler that can be used
+	// throughout an OpenTelemetry instrumented project. When a user
+	// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
+	// `Handle` and will be delegated to the registered ErrorHandler.
+	globalErrorHandler = defaultErrorHandler()
+
+	// Compile-time check that delegator implements ErrorHandler.
+	_ ErrorHandler = (*delegator)(nil)
+	// Compile-time check that errLogger implements ErrorHandler.
+	_ ErrorHandler = (*errLogger)(nil)
+)
+
+type delegator struct {
+	delegate unsafe.Pointer
+}
+
+func (d *delegator) Handle(err error) {
+	d.getDelegate().Handle(err)
+}
+
+func (d *delegator) getDelegate() ErrorHandler {
+	return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate))
+}
+
+// setDelegate sets the ErrorHandler delegate.
+func (d *delegator) setDelegate(eh ErrorHandler) {
+	atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh))
+}
+
+func defaultErrorHandler() *delegator {
+	d := &delegator{}
+	d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
+	return d
+}
+
+// errLogger logs errors if no delegate is set, otherwise they are delegated.
+type errLogger struct {
+	l *log.Logger
+}
+
+// Handle logs err if no delegate is set, otherwise it is delegated.
+func (h *errLogger) Handle(err error) {
+	h.l.Print(err)
+}
+
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler {
+	return globalErrorHandler
+}
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) {
+	globalErrorHandler.setDelegate(h)
+}
+
+// Handle is a convenience function for ErrorHandler().Handle(err).
+func Handle(err error) {
+	GetErrorHandler().Handle(err)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
new file mode 100644
index 000000000..622c3ee3f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
@@ -0,0 +1,111 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package attribute provide several helper functions for some commonly used
+logic of processing attributes.
+*/
+package attribute // import "go.opentelemetry.io/otel/internal/attribute"
+
+import (
+	"reflect"
+)
+
+// BoolSliceValue converts a bool slice into an array with same elements as slice.
+func BoolSliceValue(v []bool) interface{} {
+	var zero bool
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+	copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v)
+	return cp.Elem().Interface()
+}
+
+// Int64SliceValue converts an int64 slice into an array with same elements as slice.
+func Int64SliceValue(v []int64) interface{} {
+	var zero int64
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+	copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v)
+	return cp.Elem().Interface()
+}
+
+// Float64SliceValue converts a float64 slice into an array with same elements as slice.
+func Float64SliceValue(v []float64) interface{} {
+	var zero float64
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+	copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v)
+	return cp.Elem().Interface()
+}
+
+// StringSliceValue converts a string slice into an array with same elements as slice.
+func StringSliceValue(v []string) interface{} {
+	var zero string
+	cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+	copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v)
+	return cp.Elem().Interface()
+}
+
+// AsBoolSlice converts a bool array into a slice into with same elements as array.
+func AsBoolSlice(v interface{}) []bool {
+	rv := reflect.ValueOf(v)
+	if rv.Type().Kind() != reflect.Array {
+		return nil
+	}
+	var zero bool
+	correctLen := rv.Len()
+	correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+	cpy := reflect.New(correctType)
+	_ = reflect.Copy(cpy.Elem(), rv)
+	return cpy.Elem().Slice(0, correctLen).Interface().([]bool)
+}
+
+// AsInt64Slice converts an int64 array into a slice into with same elements as array.
+func AsInt64Slice(v interface{}) []int64 {
+	rv := reflect.ValueOf(v)
+	if rv.Type().Kind() != reflect.Array {
+		return nil
+	}
+	var zero int64
+	correctLen := rv.Len()
+	correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+	cpy := reflect.New(correctType)
+	_ = reflect.Copy(cpy.Elem(), rv)
+	return cpy.Elem().Slice(0, correctLen).Interface().([]int64)
+}
+
+// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
+func AsFloat64Slice(v interface{}) []float64 {
+	rv := reflect.ValueOf(v)
+	if rv.Type().Kind() != reflect.Array {
+		return nil
+	}
+	var zero float64
+	correctLen := rv.Len()
+	correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+	cpy := reflect.New(correctType)
+	_ = reflect.Copy(cpy.Elem(), rv)
+	return cpy.Elem().Slice(0, correctLen).Interface().([]float64)
+}
+
+// AsStringSlice converts a string array into a slice into with same elements as array.
+func AsStringSlice(v interface{}) []string {
+	rv := reflect.ValueOf(v)
+	if rv.Type().Kind() != reflect.Array {
+		return nil
+	}
+	var zero string
+	correctLen := rv.Len()
+	correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+	cpy := reflect.New(correctType)
+	_ = reflect.Copy(cpy.Elem(), rv)
+	return cpy.Elem().Slice(0, correctLen).Interface().([]string)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
new file mode 100644
index 000000000..b96e5408e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
@@ -0,0 +1,43 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package baggage provides base types and functionality to store and retrieve
+baggage in Go context. This package exists because the OpenTracing bridge to
+OpenTelemetry needs to synchronize state whenever baggage for a context is
+modified and that context contains an OpenTracing span. If it were not for
+this need this package would not need to exist and the
+`go.opentelemetry.io/otel/baggage` package would be the singular place where
+W3C baggage is handled.
+*/
+package baggage // import "go.opentelemetry.io/otel/internal/baggage"
+
+// List is the collection of baggage members. The W3C allows for duplicates,
+// but OpenTelemetry does not, therefore, this is represented as a map.
+type List map[string]Item
+
+// Item is the value and metadata properties part of a list-member.
+type Item struct {
+	Value      string
+	Properties []Property
+}
+
+// Property is a metadata entry for a list-member.
+type Property struct {
+	Key, Value string
+
+	// HasValue indicates if a zero-value value means the property does not
+	// have a value or if it was the zero-value.
+	HasValue bool
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
new file mode 100644
index 000000000..4469700d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage // import "go.opentelemetry.io/otel/internal/baggage"
+
+import "context"
+
+type baggageContextKeyType int
+
+const baggageKey baggageContextKeyType = iota
+
+// SetHookFunc is a callback called when storing baggage in the context.
+type SetHookFunc func(context.Context, List) context.Context
+
+// GetHookFunc is a callback called when getting baggage from the context.
+type GetHookFunc func(context.Context, List) List
+
+type baggageState struct {
+	list List
+
+	setHook SetHookFunc
+	getHook GetHookFunc
+}
+
+// ContextWithSetHook returns a copy of parent with hook configured to be
+// invoked every time ContextWithBaggage is called.
+//
+// Passing nil SetHookFunc creates a context with no set hook to call.
+func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context {
+	var s baggageState
+	if v, ok := parent.Value(baggageKey).(baggageState); ok {
+		s = v
+	}
+
+	s.setHook = hook
+	return context.WithValue(parent, baggageKey, s)
+}
+
+// ContextWithGetHook returns a copy of parent with hook configured to be
+// invoked every time FromContext is called.
+//
+// Passing nil GetHookFunc creates a context with no get hook to call.
+func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context {
+	var s baggageState
+	if v, ok := parent.Value(baggageKey).(baggageState); ok {
+		s = v
+	}
+
+	s.getHook = hook
+	return context.WithValue(parent, baggageKey, s)
+}
+
+// ContextWithList returns a copy of parent with baggage. Passing nil list
+// returns a context without any baggage.
+func ContextWithList(parent context.Context, list List) context.Context {
+	var s baggageState
+	if v, ok := parent.Value(baggageKey).(baggageState); ok {
+		s = v
+	}
+
+	s.list = list
+	ctx := context.WithValue(parent, baggageKey, s)
+	if s.setHook != nil {
+		ctx = s.setHook(ctx, list)
+	}
+
+	return ctx
+}
+
+// ListFromContext returns the baggage contained in ctx.
+func ListFromContext(ctx context.Context) List {
+	switch v := ctx.Value(baggageKey).(type) {
+	case baggageState:
+		if v.getHook != nil {
+			return v.getHook(ctx, v.list)
+		}
+		return v.list
+	default:
+		return nil
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
new file mode 100644
index 000000000..293c08961
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -0,0 +1,63 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"log"
+	"os"
+	"sync/atomic"
+	"unsafe"
+
+	"github.com/go-logr/logr"
+	"github.com/go-logr/stdr"
+)
+
+// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals.
+//
+// The default logger uses stdr which is backed by the standard `log.Logger`
+// interface. This logger will only show messages at the Error Level.
+var globalLogger unsafe.Pointer
+
+func init() {
+	SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+}
+
+// SetLogger overrides the globalLogger with l.
+//
+// To see Info messages use a logger with `l.V(1).Enabled() == true`
+// To see Debug messages use a logger with `l.V(5).Enabled() == true`.
+func SetLogger(l logr.Logger) {
+	atomic.StorePointer(&globalLogger, unsafe.Pointer(&l))
+}
+
+func getLogger() logr.Logger {
+	return *(*logr.Logger)(atomic.LoadPointer(&globalLogger))
+}
+
+// Info prints messages about the general state of the API or SDK.
+// This should usually be less then 5 messages a minute.
+func Info(msg string, keysAndValues ...interface{}) {
+	getLogger().V(1).Info(msg, keysAndValues...)
+}
+
+// Error prints messages about exceptional states of the API or SDK.
+func Error(err error, msg string, keysAndValues ...interface{}) {
+	getLogger().Error(err, msg, keysAndValues...)
+}
+
+// Debug prints messages about all internal changes in the API or SDK.
+func Debug(msg string, keysAndValues ...interface{}) {
+	getLogger().V(5).Info(msg, keysAndValues...)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
new file mode 100644
index 000000000..06bac35c2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"context"
+	"sync"
+
+	"go.opentelemetry.io/otel/propagation"
+)
+
+// textMapPropagator is a default TextMapPropagator that delegates calls to a
+// registered delegate if one is set, otherwise it defaults to delegating the
+// calls to a the default no-op propagation.TextMapPropagator.
+type textMapPropagator struct {
+	mtx      sync.Mutex
+	once     sync.Once
+	delegate propagation.TextMapPropagator
+	noop     propagation.TextMapPropagator
+}
+
+// Compile-time guarantee that textMapPropagator implements the
+// propagation.TextMapPropagator interface.
+var _ propagation.TextMapPropagator = (*textMapPropagator)(nil)
+
+func newTextMapPropagator() *textMapPropagator {
+	return &textMapPropagator{
+		noop: propagation.NewCompositeTextMapPropagator(),
+	}
+}
+
+// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are
+// forwarded to. Delegation can only be performed once, all subsequent calls
+// perform no delegation.
+func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) {
+	if delegate == nil {
+		return
+	}
+
+	p.mtx.Lock()
+	p.once.Do(func() { p.delegate = delegate })
+	p.mtx.Unlock()
+}
+
+// effectiveDelegate returns the current delegate of p if one is set,
+// otherwise the default noop TextMapPropagator is returned. This method
+// can be called concurrently.
+func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+	if p.delegate != nil {
+		return p.delegate
+	}
+	return p.noop
+}
+
+// Inject set cross-cutting concerns from the Context into the carrier.
+func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) {
+	p.effectiveDelegate().Inject(ctx, carrier)
+}
+
+// Extract reads cross-cutting concerns from the carrier into a Context.
+func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context {
+	return p.effectiveDelegate().Extract(ctx, carrier)
+}
+
+// Fields returns the keys whose values are set with Inject.
+func (p *textMapPropagator) Fields() []string {
+	return p.effectiveDelegate().Fields()
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go
new file mode 100644
index 000000000..1ad38f828
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go
@@ -0,0 +1,115 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+	"errors"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/propagation"
+	"go.opentelemetry.io/otel/trace"
+)
+
+type (
+	tracerProviderHolder struct {
+		tp trace.TracerProvider
+	}
+
+	propagatorsHolder struct {
+		tm propagation.TextMapPropagator
+	}
+)
+
+var (
+	globalTracer      = defaultTracerValue()
+	globalPropagators = defaultPropagatorsValue()
+
+	delegateTraceOnce             sync.Once
+	delegateTextMapPropagatorOnce sync.Once
+)
+
+// TracerProvider is the internal implementation for global.TracerProvider.
+func TracerProvider() trace.TracerProvider {
+	return globalTracer.Load().(tracerProviderHolder).tp
+}
+
+// SetTracerProvider is the internal implementation for global.SetTracerProvider.
+func SetTracerProvider(tp trace.TracerProvider) {
+	current := TracerProvider()
+
+	if _, cOk := current.(*tracerProvider); cOk {
+		if _, tpOk := tp.(*tracerProvider); tpOk && current == tp {
+			// Do not assign the default delegating TracerProvider to delegate
+			// to itself.
+			Error(
+				errors.New("no delegate configured in tracer provider"),
+				"Setting tracer provider to it's current value. No delegate will be configured",
+			)
+			return
+		}
+	}
+
+	delegateTraceOnce.Do(func() {
+		if def, ok := current.(*tracerProvider); ok {
+			def.setDelegate(tp)
+		}
+	})
+	globalTracer.Store(tracerProviderHolder{tp: tp})
+}
+
+// TextMapPropagator is the internal implementation for global.TextMapPropagator.
+func TextMapPropagator() propagation.TextMapPropagator {
+	return globalPropagators.Load().(propagatorsHolder).tm
+}
+
+// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
+func SetTextMapPropagator(p propagation.TextMapPropagator) {
+	current := TextMapPropagator()
+
+	if _, cOk := current.(*textMapPropagator); cOk {
+		if _, pOk := p.(*textMapPropagator); pOk && current == p {
+			// Do not assign the default delegating TextMapPropagator to
+			// delegate to itself.
+			Error(
+				errors.New("no delegate configured in text map propagator"),
+				"Setting text map propagator to it's current value. No delegate will be configured",
+			)
+			return
+		}
+	}
+
+	// For the textMapPropagator already returned by TextMapPropagator
+	// delegate to p.
+	delegateTextMapPropagatorOnce.Do(func() {
+		if def, ok := current.(*textMapPropagator); ok {
+			def.SetDelegate(p)
+		}
+	})
+	// Return p when subsequent calls to TextMapPropagator are made.
+	globalPropagators.Store(propagatorsHolder{tm: p})
+}
+
+func defaultTracerValue() *atomic.Value {
+	v := &atomic.Value{}
+	v.Store(tracerProviderHolder{tp: &tracerProvider{}})
+	return v
+}
+
+func defaultPropagatorsValue() *atomic.Value {
+	v := &atomic.Value{}
+	v.Store(propagatorsHolder{tm: newTextMapPropagator()})
+	return v
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
new file mode 100644
index 000000000..5f008d098
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -0,0 +1,192 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+/*
+This file contains the forwarding implementation of the TracerProvider used as
+the default global instance. Prior to initialization of an SDK, Tracers
+returned by the global TracerProvider will provide no-op functionality. This
+means that all Span created prior to initialization are no-op Spans.
+
+Once an SDK has been initialized, all provided no-op Tracers are swapped for
+Tracers provided by the SDK defined TracerProvider. However, any Span started
+prior to this initialization does not change its behavior. Meaning, the Span
+remains a no-op Span.
+
+The implementation to track and swap Tracers locks all new Tracer creation
+until the swap is complete. This assumes that this operation is not
+performance-critical. If that assumption is incorrect, be sure to configure an
+SDK prior to any Tracer creation.
+*/
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// tracerProvider is a placeholder for a configured SDK TracerProvider.
+//
+// All TracerProvider functionality is forwarded to a delegate once
+// configured.
+type tracerProvider struct {
+	mtx      sync.Mutex
+	tracers  map[il]*tracer
+	delegate trace.TracerProvider
+}
+
+// Compile-time guarantee that tracerProvider implements the TracerProvider
+// interface.
+var _ trace.TracerProvider = &tracerProvider{}
+
+// setDelegate configures p to delegate all TracerProvider functionality to
+// provider.
+//
+// All Tracers provided prior to this function call are switched out to be
+// Tracers provided by provider.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	p.delegate = provider
+
+	if len(p.tracers) == 0 {
+		return
+	}
+
+	for _, t := range p.tracers {
+		t.setDelegate(provider)
+	}
+
+	p.tracers = nil
+}
+
+// Tracer implements TracerProvider.
+func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	if p.delegate != nil {
+		return p.delegate.Tracer(name, opts...)
+	}
+
+	// At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map.
+
+	c := trace.NewTracerConfig(opts...)
+	key := il{
+		name:    name,
+		version: c.InstrumentationVersion(),
+	}
+
+	if p.tracers == nil {
+		p.tracers = make(map[il]*tracer)
+	}
+
+	if val, ok := p.tracers[key]; ok {
+		return val
+	}
+
+	t := &tracer{name: name, opts: opts, provider: p}
+	p.tracers[key] = t
+	return t
+}
+
+type il struct {
+	name    string
+	version string
+}
+
+// tracer is a placeholder for a trace.Tracer.
+//
+// All Tracer functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopTracer.
+type tracer struct {
+	name     string
+	opts     []trace.TracerOption
+	provider *tracerProvider
+
+	delegate atomic.Value
+}
+
+// Compile-time guarantee that tracer implements the trace.Tracer interface.
+var _ trace.Tracer = &tracer{}
+
+// setDelegate configures t to delegate all Tracer functionality to Tracers
+// created by provider.
+//
+// All subsequent calls to the Tracer methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (t *tracer) setDelegate(provider trace.TracerProvider) {
+	t.delegate.Store(provider.Tracer(t.name, t.opts...))
+}
+
+// Start implements trace.Tracer by forwarding the call to t.delegate if
+// set, otherwise it forwards the call to a NoopTracer.
+func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+	delegate := t.delegate.Load()
+	if delegate != nil {
+		return delegate.(trace.Tracer).Start(ctx, name, opts...)
+	}
+
+	s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
+	ctx = trace.ContextWithSpan(ctx, s)
+	return ctx, s
+}
+
+// nonRecordingSpan is a minimal implementation of a Span that wraps a
+// SpanContext. It performs no operations other than to return the wrapped
+// SpanContext.
+type nonRecordingSpan struct {
+	sc     trace.SpanContext
+	tracer *tracer
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
new file mode 100644
index 000000000..e07e79400
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
@@ -0,0 +1,55 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/internal"
+
+import (
+	"math"
+	"unsafe"
+)
+
+func BoolToRaw(b bool) uint64 { // nolint:revive  // b is not a control flag.
+	if b {
+		return 1
+	}
+	return 0
+}
+
+func RawToBool(r uint64) bool {
+	return r != 0
+}
+
+func Int64ToRaw(i int64) uint64 {
+	return uint64(i)
+}
+
+func RawToInt64(r uint64) int64 {
+	return int64(r)
+}
+
+func Float64ToRaw(f float64) uint64 {
+	return math.Float64bits(f)
+}
+
+func RawToFloat64(r uint64) float64 {
+	return math.Float64frombits(r)
+}
+
+func RawPtrToFloat64Ptr(r *uint64) *float64 {
+	return (*float64)(unsafe.Pointer(r))
+}
+
+func RawPtrToInt64Ptr(r *uint64) *int64 {
+	return (*int64)(unsafe.Pointer(r))
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go
new file mode 100644
index 000000000..c4f8acd5d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal_logging.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+	"github.com/go-logr/logr"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// SetLogger configures the logger used internally to opentelemetry.
+func SetLogger(logger logr.Logger) {
+	global.SetLogger(logger)
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
new file mode 100644
index 000000000..621e4c5fc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -0,0 +1,69 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+// MeterConfig contains options for Meters.
+type MeterConfig struct {
+	instrumentationVersion string
+	schemaURL              string
+}
+
+// InstrumentationVersion is the version of the library providing instrumentation.
+func (cfg MeterConfig) InstrumentationVersion() string {
+	return cfg.instrumentationVersion
+}
+
+// SchemaURL is the schema_url of the library providing instrumentation.
+func (cfg MeterConfig) SchemaURL() string {
+	return cfg.schemaURL
+}
+
+// MeterOption is an interface for applying Meter options.
+type MeterOption interface {
+	// applyMeter is used to set a MeterOption value of a MeterConfig.
+	applyMeter(MeterConfig) MeterConfig
+}
+
+// NewMeterConfig creates a new MeterConfig and applies
+// all the given options.
+func NewMeterConfig(opts ...MeterOption) MeterConfig {
+	var config MeterConfig
+	for _, o := range opts {
+		config = o.applyMeter(config)
+	}
+	return config
+}
+
+type meterOptionFunc func(MeterConfig) MeterConfig
+
+func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
+	return fn(cfg)
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) MeterOption {
+	return meterOptionFunc(func(config MeterConfig) MeterConfig {
+		config.instrumentationVersion = version
+		return config
+	})
+}
+
+// WithSchemaURL sets the schema URL.
+func WithSchemaURL(schemaURL string) MeterOption {
+	return meterOptionFunc(func(config MeterConfig) MeterConfig {
+		config.schemaURL = schemaURL
+		return config
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go
new file mode 100644
index 000000000..bd6f43437
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/doc.go
@@ -0,0 +1,23 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package metric provides an implementation of the metrics part of the
+OpenTelemetry API.
+
+This package is currently in a pre-GA phase. Backwards incompatible changes
+may be introduced in subsequent minor version releases as we work to track the
+evolving OpenTelemetry specification and user feedback.
+*/
+package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/vendor/go.opentelemetry.io/otel/metric/global/global.go b/vendor/go.opentelemetry.io/otel/metric/global/global.go
new file mode 100644
index 000000000..05a67c2e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/global/global.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/metric/global"
+
+import (
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/internal/global"
+)
+
+// Meter returns a Meter from the global MeterProvider. The
+// instrumentationName must be the name of the library providing
+// instrumentation. This name may be the same as the instrumented code only if
+// that code provides built-in instrumentation. If the instrumentationName is
+// empty, then a implementation defined default name will be used instead.
+//
+// This is short for MeterProvider().Meter(name).
+func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
+	return MeterProvider().Meter(instrumentationName, opts...)
+}
+
+// MeterProvider returns the registered global trace provider.
+// If none is registered then a No-op MeterProvider is returned.
+func MeterProvider() metric.MeterProvider {
+	return global.MeterProvider()
+}
+
+// SetMeterProvider registers `mp` as the global meter provider.
+func SetMeterProvider(mp metric.MeterProvider) {
+	global.SetMeterProvider(mp)
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go
new file mode 100644
index 000000000..2f624bfe0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go
@@ -0,0 +1,131 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrument // import "go.opentelemetry.io/otel/metric/instrument"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric/unit"
+)
+
+// Float64Observable describes a set of instruments used asynchronously to
+// record float64 measurements once per collection cycle. Observations of
+// these instruments are only made within a callback.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64Observable interface {
+	Asynchronous
+
+	float64Observable()
+}
+
+// Float64ObservableCounter is an instrument used to asynchronously record
+// increasing float64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64ObservableCounter interface{ Float64Observable }
+
+// Float64ObservableUpDownCounter is an instrument used to asynchronously
+// record float64 measurements once per collection cycle. Observations are only
+// made within a callback for this instrument. The value observed is assumed
+// the to be the cumulative sum of the count.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64ObservableUpDownCounter interface{ Float64Observable }
+
+// Float64ObservableGauge is an instrument used to asynchronously record
+// instantaneous float64 measurements once per collection cycle. Observations
+// are only made within a callback for this instrument.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64ObservableGauge interface{ Float64Observable }
+
+// Float64Observer is a recorder of float64 measurements.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64Observer interface {
+	Observe(value float64, attributes ...attribute.KeyValue)
+}
+
+// Float64Callback is a function registered with a Meter that makes
+// observations for a Float64Observerable instrument it is registered with.
+// Calls to the Float64Observer record measurement values for the
+// Float64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Float64Callbacks. Meaning, it should not report measurements with the same
+// attributes as another Float64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Float64Callback func(context.Context, Float64Observer) error
+
+// Float64ObserverConfig contains options for Asynchronous instruments that
+// observe float64 values.
+type Float64ObserverConfig struct {
+	description string
+	unit        unit.Unit
+	callbacks   []Float64Callback
+}
+
+// NewFloat64ObserverConfig returns a new Float64ObserverConfig with all opts
+// applied.
+func NewFloat64ObserverConfig(opts ...Float64ObserverOption) Float64ObserverConfig {
+	var config Float64ObserverConfig
+	for _, o := range opts {
+		config = o.applyFloat64Observer(config)
+	}
+	return config
+}
+
+// Description returns the Config description.
+func (c Float64ObserverConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the Config unit.
+func (c Float64ObserverConfig) Unit() unit.Unit {
+	return c.unit
+}
+
+// Callbacks returns the Config callbacks.
+func (c Float64ObserverConfig) Callbacks() []Float64Callback {
+	return c.callbacks
+}
+
+// Float64ObserverOption applies options to float64 Observer instruments.
+type Float64ObserverOption interface {
+	applyFloat64Observer(Float64ObserverConfig) Float64ObserverConfig
+}
+
+type float64ObserverOptionFunc func(Float64ObserverConfig) Float64ObserverConfig
+
+func (fn float64ObserverOptionFunc) applyFloat64Observer(cfg Float64ObserverConfig) Float64ObserverConfig {
+	return fn(cfg)
+}
+
+// WithFloat64Callback adds callback to be called for an instrument.
+func WithFloat64Callback(callback Float64Callback) Float64ObserverOption {
+	return float64ObserverOptionFunc(func(cfg Float64ObserverConfig) Float64ObserverConfig {
+		cfg.callbacks = append(cfg.callbacks, callback)
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go
new file mode 100644
index 000000000..ac0d09d90
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go
@@ -0,0 +1,131 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrument // import "go.opentelemetry.io/otel/metric/instrument"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric/unit"
+)
+
+// Int64Observable describes a set of instruments used asynchronously to record
+// int64 measurements once per collection cycle. Observations of these
+// instruments are only made within a callback.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64Observable interface {
+	Asynchronous
+
+	int64Observable()
+}
+
+// Int64ObservableCounter is an instrument used to asynchronously record
+// increasing int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64ObservableCounter interface{ Int64Observable }
+
+// Int64ObservableUpDownCounter is an instrument used to asynchronously record
+// int64 measurements once per collection cycle. Observations are only made
+// within a callback for this instrument. The value observed is assumed the to
+// be the cumulative sum of the count.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64ObservableUpDownCounter interface{ Int64Observable }
+
+// Int64ObservableGauge is an instrument used to asynchronously record
+// instantaneous int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64ObservableGauge interface{ Int64Observable }
+
+// Int64Observer is a recorder of int64 measurements.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64Observer interface {
+	Observe(value int64, attributes ...attribute.KeyValue)
+}
+
+// Int64Callback is a function registered with a Meter that makes
+// observations for a Int64Observerable instrument it is registered with.
+// Calls to the Int64Observer record measurement values for the
+// Int64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Int64Callback. Meaning, it should not report measurements with the same
+// attributes as another Int64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Int64Callback func(context.Context, Int64Observer) error
+
+// Int64ObserverConfig contains options for Asynchronous instruments that
+// observe int64 values.
+type Int64ObserverConfig struct {
+	description string
+	unit        unit.Unit
+	callbacks   []Int64Callback
+}
+
+// NewInt64ObserverConfig returns a new Int64ObserverConfig with all opts
+// applied.
+func NewInt64ObserverConfig(opts ...Int64ObserverOption) Int64ObserverConfig {
+	var config Int64ObserverConfig
+	for _, o := range opts {
+		config = o.applyInt64Observer(config)
+	}
+	return config
+}
+
+// Description returns the Config description.
+func (c Int64ObserverConfig) Description() string {
+	return c.description
+}
+
+// Unit returns the Config unit.
+func (c Int64ObserverConfig) Unit() unit.Unit {
+	return c.unit
+}
+
+// Callbacks returns the Config callbacks.
+func (c Int64ObserverConfig) Callbacks() []Int64Callback {
+	return c.callbacks
+}
+
+// Int64ObserverOption applies options to int64 Observer instruments.
+type Int64ObserverOption interface {
+	applyInt64Observer(Int64ObserverConfig) Int64ObserverConfig
+}
+
+type int64ObserverOptionFunc func(Int64ObserverConfig) Int64ObserverConfig
+
+func (fn int64ObserverOptionFunc) applyInt64Observer(cfg Int64ObserverConfig) Int64ObserverConfig {
+	return fn(cfg)
+}
+
+// WithInt64Callback adds callback to be called for an instrument.
+func WithInt64Callback(callback Int64Callback) Int64ObserverOption {
+	return int64ObserverOptionFunc(func(cfg Int64ObserverConfig) Int64ObserverConfig {
+		cfg.callbacks = append(cfg.callbacks, callback)
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go
new file mode 100644
index 000000000..c583be6fb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go
@@ -0,0 +1,90 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrument // import "go.opentelemetry.io/otel/metric/instrument"
+
+import "go.opentelemetry.io/otel/metric/unit"
+
+// Asynchronous instruments are instruments that are updated within a Callback.
+// If an instrument is observed outside of it's callback it should be an error.
+//
+// This interface is used as a grouping mechanism.
+type Asynchronous interface {
+	asynchronous()
+}
+
+// Synchronous instruments are updated in line with application code.
+//
+// This interface is used as a grouping mechanism.
+type Synchronous interface {
+	synchronous()
+}
+
+// Option applies options to all instruments.
+type Option interface {
+	Float64ObserverOption
+	Int64ObserverOption
+	Float64Option
+	Int64Option
+}
+
+type descOpt string
+
+func (o descOpt) applyFloat64(c Float64Config) Float64Config {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyInt64(c Int64Config) Int64Config {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig {
+	c.description = string(o)
+	return c
+}
+
+func (o descOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig {
+	c.description = string(o)
+	return c
+}
+
+// WithDescription sets the instrument description.
+func WithDescription(desc string) Option { return descOpt(desc) }
+
+type unitOpt unit.Unit
+
+func (o unitOpt) applyFloat64(c Float64Config) Float64Config {
+	c.unit = unit.Unit(o)
+	return c
+}
+
+func (o unitOpt) applyInt64(c Int64Config) Int64Config {
+	c.unit = unit.Unit(o)
+	return c
+}
+
+func (o unitOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig {
+	c.unit = unit.Unit(o)
+	return c
+}
+
+func (o unitOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig {
+	c.unit = unit.Unit(o)
+	return c
+}
+
+// WithUnit sets the instrument unit.
+func WithUnit(u unit.Unit) Option { return unitOpt(u) }
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go
new file mode 100644
index 000000000..d8f6ba9f4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go
@@ -0,0 +1,86 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrument // import "go.opentelemetry.io/otel/metric/instrument"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric/unit"
+)
+
+// Float64Counter is an instrument that records increasing float64 values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64Counter interface {
+	// Add records a change to the counter.
+	Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
+
+	Synchronous
+}
+
+// Float64UpDownCounter is an instrument that records increasing or decreasing
+// float64 values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64UpDownCounter interface {
+	// Add records a change to the counter.
+	Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
+
+	Synchronous
+}
+
+// Float64Histogram is an instrument that records a distribution of float64
+// values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Float64Histogram interface {
+	// Record adds an additional value to the distribution.
+	Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
+
+	Synchronous
+}
+
+// Float64Config contains options for Asynchronous instruments that
+// observe float64 values.
+type Float64Config struct {
+	description string
+	unit        unit.Unit
+}
+
+// Float64Config contains options for Synchronous instruments that record
+// float64 values.
+func NewFloat64Config(opts ...Float64Option) Float64Config {
+	var config Float64Config
+	for _, o := range opts {
+		config = o.applyFloat64(config)
+	}
+	return config
+}
+
+// Description returns the Config description.
+func (c Float64Config) Description() string {
+	return c.description
+}
+
+// Unit returns the Config unit.
+func (c Float64Config) Unit() unit.Unit {
+	return c.unit
+}
+
+// Float64Option applies options to synchronous float64 instruments.
+type Float64Option interface {
+	applyFloat64(Float64Config) Float64Config
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go
new file mode 100644
index 000000000..96bf730e4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go
@@ -0,0 +1,86 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrument // import "go.opentelemetry.io/otel/metric/instrument"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric/unit"
+)
+
+// Int64Counter is an instrument that records increasing int64 values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64Counter interface {
+	// Add records a change to the counter.
+	Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
+
+	Synchronous
+}
+
+// Int64UpDownCounter is an instrument that records increasing or decreasing
+// int64 values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64UpDownCounter interface {
+	// Add records a change to the counter.
+	Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
+
+	Synchronous
+}
+
+// Int64Histogram is an instrument that records a distribution of int64
+// values.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Int64Histogram interface {
+	// Record adds an additional value to the distribution.
+	Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
+
+	Synchronous
+}
+
+// Int64Config contains options for Synchronous instruments that record int64
+// values.
+type Int64Config struct {
+	description string
+	unit        unit.Unit
+}
+
+// NewInt64Config returns a new Int64Config with all opts
+// applied.
+func NewInt64Config(opts ...Int64Option) Int64Config {
+	var config Int64Config
+	for _, o := range opts {
+		config = o.applyInt64(config)
+	}
+	return config
+}
+
+// Description returns the Config description.
+func (c Int64Config) Description() string {
+	return c.description
+}
+
+// Unit returns the Config unit.
+func (c Int64Config) Unit() unit.Unit {
+	return c.unit
+}
+
+// Int64Option applies options to synchronous int64 instruments.
+type Int64Option interface {
+	applyInt64(Int64Config) Int64Config
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
new file mode 100644
index 000000000..d1480fa5f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go
@@ -0,0 +1,355 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/metric/internal/global"
+
+import (
+	"context"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/instrument"
+)
+
+// unwrapper unwraps to return the underlying instrument implementation.
+type unwrapper interface {
+	Unwrap() instrument.Asynchronous
+}
+
+type afCounter struct {
+	instrument.Float64Observable
+
+	name string
+	opts []instrument.Float64ObserverOption
+
+	delegate atomic.Value //instrument.Float64ObservableCounter
+}
+
+var _ unwrapper = (*afCounter)(nil)
+var _ instrument.Float64ObservableCounter = (*afCounter)(nil)
+
+func (i *afCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *afCounter) Unwrap() instrument.Asynchronous {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(instrument.Float64ObservableCounter)
+	}
+	return nil
+}
+
+type afUpDownCounter struct {
+	instrument.Float64Observable
+
+	name string
+	opts []instrument.Float64ObserverOption
+
+	delegate atomic.Value //instrument.Float64ObservableUpDownCounter
+}
+
+var _ unwrapper = (*afUpDownCounter)(nil)
+var _ instrument.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
+
+func (i *afUpDownCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *afUpDownCounter) Unwrap() instrument.Asynchronous {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(instrument.Float64ObservableUpDownCounter)
+	}
+	return nil
+}
+
+type afGauge struct {
+	instrument.Float64Observable
+
+	name string
+	opts []instrument.Float64ObserverOption
+
+	delegate atomic.Value //instrument.Float64ObservableGauge
+}
+
+var _ unwrapper = (*afGauge)(nil)
+var _ instrument.Float64ObservableGauge = (*afGauge)(nil)
+
+func (i *afGauge) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *afGauge) Unwrap() instrument.Asynchronous {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(instrument.Float64ObservableGauge)
+	}
+	return nil
+}
+
+type aiCounter struct {
+	instrument.Int64Observable
+
+	name string
+	opts []instrument.Int64ObserverOption
+
+	delegate atomic.Value //instrument.Int64ObservableCounter
+}
+
+var _ unwrapper = (*aiCounter)(nil)
+var _ instrument.Int64ObservableCounter = (*aiCounter)(nil)
+
+func (i *aiCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *aiCounter) Unwrap() instrument.Asynchronous {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(instrument.Int64ObservableCounter)
+	}
+	return nil
+}
+
+type aiUpDownCounter struct {
+	instrument.Int64Observable
+
+	name string
+	opts []instrument.Int64ObserverOption
+
+	delegate atomic.Value //instrument.Int64ObservableUpDownCounter
+}
+
+var _ unwrapper = (*aiUpDownCounter)(nil)
+var _ instrument.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
+
+func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *aiUpDownCounter) Unwrap() instrument.Asynchronous {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(instrument.Int64ObservableUpDownCounter)
+	}
+	return nil
+}
+
+type aiGauge struct {
+	instrument.Int64Observable
+
+	name string
+	opts []instrument.Int64ObserverOption
+
+	delegate atomic.Value //instrument.Int64ObservableGauge
+}
+
+var _ unwrapper = (*aiGauge)(nil)
+var _ instrument.Int64ObservableGauge = (*aiGauge)(nil)
+
+func (i *aiGauge) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *aiGauge) Unwrap() instrument.Asynchronous {
+	if ctr := i.delegate.Load(); ctr != nil {
+		return ctr.(instrument.Int64ObservableGauge)
+	}
+	return nil
+}
+
+// Sync Instruments.
+type sfCounter struct {
+	name string
+	opts []instrument.Float64Option
+
+	delegate atomic.Value //instrument.Float64Counter
+
+	instrument.Synchronous
+}
+
+var _ instrument.Float64Counter = (*sfCounter)(nil)
+
+func (i *sfCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64Counter(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(instrument.Float64Counter).Add(ctx, incr, attrs...)
+	}
+}
+
+type sfUpDownCounter struct {
+	name string
+	opts []instrument.Float64Option
+
+	delegate atomic.Value //instrument.Float64UpDownCounter
+
+	instrument.Synchronous
+}
+
+var _ instrument.Float64UpDownCounter = (*sfUpDownCounter)(nil)
+
+func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64UpDownCounter(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(instrument.Float64UpDownCounter).Add(ctx, incr, attrs...)
+	}
+}
+
+type sfHistogram struct {
+	name string
+	opts []instrument.Float64Option
+
+	delegate atomic.Value //instrument.Float64Histogram
+
+	instrument.Synchronous
+}
+
+var _ instrument.Float64Histogram = (*sfHistogram)(nil)
+
+func (i *sfHistogram) setDelegate(m metric.Meter) {
+	ctr, err := m.Float64Histogram(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(instrument.Float64Histogram).Record(ctx, x, attrs...)
+	}
+}
+
+type siCounter struct {
+	name string
+	opts []instrument.Int64Option
+
+	delegate atomic.Value //instrument.Int64Counter
+
+	instrument.Synchronous
+}
+
+var _ instrument.Int64Counter = (*siCounter)(nil)
+
+func (i *siCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64Counter(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(instrument.Int64Counter).Add(ctx, x, attrs...)
+	}
+}
+
+type siUpDownCounter struct {
+	name string
+	opts []instrument.Int64Option
+
+	delegate atomic.Value //instrument.Int64UpDownCounter
+
+	instrument.Synchronous
+}
+
+var _ instrument.Int64UpDownCounter = (*siUpDownCounter)(nil)
+
+func (i *siUpDownCounter) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64UpDownCounter(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(instrument.Int64UpDownCounter).Add(ctx, x, attrs...)
+	}
+}
+
+type siHistogram struct {
+	name string
+	opts []instrument.Int64Option
+
+	delegate atomic.Value //instrument.Int64Histogram
+
+	instrument.Synchronous
+}
+
+var _ instrument.Int64Histogram = (*siHistogram)(nil)
+
+func (i *siHistogram) setDelegate(m metric.Meter) {
+	ctr, err := m.Int64Histogram(i.name, i.opts...)
+	if err != nil {
+		otel.Handle(err)
+		return
+	}
+	i.delegate.Store(ctr)
+}
+
+func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) {
+	if ctr := i.delegate.Load(); ctr != nil {
+		ctr.(instrument.Int64Histogram).Record(ctx, x, attrs...)
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go
new file mode 100644
index 000000000..8acf63286
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go
@@ -0,0 +1,354 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/metric/internal/global"
+
+import (
+	"container/list"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/metric"
+	"go.opentelemetry.io/otel/metric/instrument"
+)
+
+// meterProvider is a placeholder for a configured SDK MeterProvider.
+//
+// All MeterProvider functionality is forwarded to a delegate once
+// configured.
+type meterProvider struct {
+	mtx    sync.Mutex
+	meters map[il]*meter
+
+	delegate metric.MeterProvider
+}
+
+type il struct {
+	name    string
+	version string
+}
+
+// setDelegate configures p to delegate all MeterProvider functionality to
+// provider.
+//
+// All Meters provided prior to this function call are switched out to be
+// Meters provided by provider. All instruments and callbacks are recreated and
+// delegated.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	p.delegate = provider
+
+	if len(p.meters) == 0 {
+		return
+	}
+
+	for _, meter := range p.meters {
+		meter.setDelegate(provider)
+	}
+
+	p.meters = nil
+}
+
+// Meter implements MeterProvider.
+func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
+	p.mtx.Lock()
+	defer p.mtx.Unlock()
+
+	if p.delegate != nil {
+		return p.delegate.Meter(name, opts...)
+	}
+
+	// At this moment it is guaranteed that no sdk is installed, save the meter in the meters map.
+
+	c := metric.NewMeterConfig(opts...)
+	key := il{
+		name:    name,
+		version: c.InstrumentationVersion(),
+	}
+
+	if p.meters == nil {
+		p.meters = make(map[il]*meter)
+	}
+
+	if val, ok := p.meters[key]; ok {
+		return val
+	}
+
+	t := &meter{name: name, opts: opts}
+	p.meters[key] = t
+	return t
+}
+
+// meter is a placeholder for a metric.Meter.
+//
+// All Meter functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopMeter.
+type meter struct {
+	name string
+	opts []metric.MeterOption
+
+	mtx         sync.Mutex
+	instruments []delegatedInstrument
+
+	registry list.List
+
+	delegate atomic.Value // metric.Meter
+}
+
+type delegatedInstrument interface {
+	setDelegate(metric.Meter)
+}
+
+// setDelegate configures m to delegate all Meter functionality to Meters
+// created by provider.
+//
+// All subsequent calls to the Meter methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (m *meter) setDelegate(provider metric.MeterProvider) {
+	meter := provider.Meter(m.name, m.opts...)
+	m.delegate.Store(meter)
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	for _, inst := range m.instruments {
+		inst.setDelegate(meter)
+	}
+
+	for e := m.registry.Front(); e != nil; e = e.Next() {
+		r := e.Value.(*registration)
+		r.setDelegate(meter)
+		m.registry.Remove(e)
+	}
+
+	m.instruments = nil
+	m.registry.Init()
+}
+
+func (m *meter) Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64Counter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &siCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64UpDownCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &siUpDownCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64Histogram(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &siHistogram{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64ObservableCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &aiCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64ObservableUpDownCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &aiUpDownCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Int64ObservableGauge(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &aiGauge{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64Counter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &sfCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64UpDownCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &sfUpDownCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64Histogram(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &sfHistogram{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64ObservableCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &afCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64ObservableUpDownCounter(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &afUpDownCounter{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+func (m *meter) Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		return del.Float64ObservableGauge(name, options...)
+	}
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	i := &afGauge{name: name, opts: options}
+	m.instruments = append(m.instruments, i)
+	return i, nil
+}
+
+// RegisterCallback captures the function that will be called during Collect.
+func (m *meter) RegisterCallback(f metric.Callback, insts ...instrument.Asynchronous) (metric.Registration, error) {
+	if del, ok := m.delegate.Load().(metric.Meter); ok {
+		insts = unwrapInstruments(insts)
+		return del.RegisterCallback(f, insts...)
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	reg := &registration{instruments: insts, function: f}
+	e := m.registry.PushBack(reg)
+	reg.unreg = func() error {
+		m.mtx.Lock()
+		_ = m.registry.Remove(e)
+		m.mtx.Unlock()
+		return nil
+	}
+	return reg, nil
+}
+
+type wrapped interface {
+	unwrap() instrument.Asynchronous
+}
+
+func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous {
+	out := make([]instrument.Asynchronous, 0, len(instruments))
+
+	for _, inst := range instruments {
+		if in, ok := inst.(wrapped); ok {
+			out = append(out, in.unwrap())
+		} else {
+			out = append(out, inst)
+		}
+	}
+
+	return out
+}
+
+type registration struct {
+	instruments []instrument.Asynchronous
+	function    metric.Callback
+
+	unreg   func() error
+	unregMu sync.Mutex
+}
+
+func (c *registration) setDelegate(m metric.Meter) {
+	insts := unwrapInstruments(c.instruments)
+
+	c.unregMu.Lock()
+	defer c.unregMu.Unlock()
+
+	if c.unreg == nil {
+		// Unregister already called.
+		return
+	}
+
+	reg, err := m.RegisterCallback(c.function, insts...)
+	if err != nil {
+		otel.Handle(err)
+	}
+
+	c.unreg = reg.Unregister
+}
+
+func (c *registration) Unregister() error {
+	c.unregMu.Lock()
+	defer c.unregMu.Unlock()
+	if c.unreg == nil {
+		// Unregister already called.
+		return nil
+	}
+
+	var err error
+	err, c.unreg = c.unreg(), nil
+	return err
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go
new file mode 100644
index 000000000..47c0d787d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go
@@ -0,0 +1,68 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     htmp://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/metric/internal/global"
+
+import (
+	"errors"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/metric"
+)
+
+var (
+	globalMeterProvider = defaultMeterProvider()
+
+	delegateMeterOnce sync.Once
+)
+
+type meterProviderHolder struct {
+	mp metric.MeterProvider
+}
+
+// MeterProvider is the internal implementation for global.MeterProvider.
+func MeterProvider() metric.MeterProvider {
+	return globalMeterProvider.Load().(meterProviderHolder).mp
+}
+
+// SetMeterProvider is the internal implementation for global.SetMeterProvider.
+func SetMeterProvider(mp metric.MeterProvider) {
+	current := MeterProvider()
+	if _, cOk := current.(*meterProvider); cOk {
+		if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
+			// Do not assign the default delegating MeterProvider to delegate
+			// to itself.
+			global.Error(
+				errors.New("no delegate configured in meter provider"),
+				"Setting meter provider to it's current value. No delegate will be configured",
+			)
+			return
+		}
+	}
+
+	delegateMeterOnce.Do(func() {
+		if def, ok := current.(*meterProvider); ok {
+			def.setDelegate(mp)
+		}
+	})
+	globalMeterProvider.Store(meterProviderHolder{mp: mp})
+}
+
+func defaultMeterProvider() *atomic.Value {
+	v := &atomic.Value{}
+	v.Store(meterProviderHolder{mp: &meterProvider{}})
+	return v
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
new file mode 100644
index 000000000..f1e917e93
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -0,0 +1,138 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric/instrument"
+)
+
+// MeterProvider provides access to named Meter instances, for instrumenting
+// an application or library.
+//
+// Warning: methods may be added to this interface in minor releases.
+type MeterProvider interface {
+	// Meter creates an instance of a `Meter` interface. The instrumentationName
+	// must be the name of the library providing instrumentation. This name may
+	// be the same as the instrumented code only if that code provides built-in
+	// instrumentation. If the instrumentationName is empty, then a
+	// implementation defined default name will be used instead.
+	Meter(instrumentationName string, opts ...MeterOption) Meter
+}
+
+// Meter provides access to instrument instances for recording metrics.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Meter interface {
+	// Int64Counter returns a new instrument identified by name and configured
+	// with options. The instrument is used to synchronously record increasing
+	// int64 measurements during a computational operation.
+	Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error)
+	// Int64UpDownCounter returns a new instrument identified by name and
+	// configured with options. The instrument is used to synchronously record
+	// int64 measurements during a computational operation.
+	Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error)
+	// Int64Histogram returns a new instrument identified by name and
+	// configured with options. The instrument is used to synchronously record
+	// the distribution of int64 measurements during a computational operation.
+	Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error)
+	// Int64ObservableCounter returns a new instrument identified by name and
+	// configured with options. The instrument is used to asynchronously record
+	// increasing int64 measurements once per a measurement collection cycle.
+	Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error)
+	// Int64ObservableUpDownCounter returns a new instrument identified by name
+	// and configured with options. The instrument is used to asynchronously
+	// record int64 measurements once per a measurement collection cycle.
+	Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error)
+	// Int64ObservableGauge returns a new instrument identified by name and
+	// configured with options. The instrument is used to asynchronously record
+	// instantaneous int64 measurements once per a measurement collection
+	// cycle.
+	Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error)
+
+	// Float64Counter returns a new instrument identified by name and
+	// configured with options. The instrument is used to synchronously record
+	// increasing float64 measurements during a computational operation.
+	Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error)
+	// Float64UpDownCounter returns a new instrument identified by name and
+	// configured with options. The instrument is used to synchronously record
+	// float64 measurements during a computational operation.
+	Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error)
+	// Float64Histogram returns a new instrument identified by name and
+	// configured with options. The instrument is used to synchronously record
+	// the distribution of float64 measurements during a computational
+	// operation.
+	Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error)
+	// Float64ObservableCounter returns a new instrument identified by name and
+	// configured with options. The instrument is used to asynchronously record
+	// increasing float64 measurements once per a measurement collection cycle.
+	Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error)
+	// Float64ObservableUpDownCounter returns a new instrument identified by
+	// name and configured with options. The instrument is used to
+	// asynchronously record float64 measurements once per a measurement
+	// collection cycle.
+	Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error)
+	// Float64ObservableGauge returns a new instrument identified by name and
+	// configured with options. The instrument is used to asynchronously record
+	// instantaneous float64 measurements once per a measurement collection
+	// cycle.
+	Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error)
+
+	// RegisterCallback registers f to be called during the collection of a
+	// measurement cycle.
+	//
+	// If Unregister of the returned Registration is called, f needs to be
+	// unregistered and not called during collection.
+	//
+	// The instruments f is registered with are the only instruments that f may
+	// observe values for.
+	//
+	// If no instruments are passed, f should not be registered nor called
+	// during collection.
+	RegisterCallback(f Callback, instruments ...instrument.Asynchronous) (Registration, error)
+}
+
+// Callback is a function registered with a Meter that makes observations for
+// the set of instruments it is registered with. The Observer parameter is used
+// to record measurment observations for these instruments.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Callbacks. Meaning, it should not report measurements for an instrument with
+// the same attributes as another Callback will report.
+//
+// The function needs to be concurrent safe.
+type Callback func(context.Context, Observer) error
+
+// Observer records measurements for multiple instruments in a Callback.
+type Observer interface {
+	// ObserveFloat64 records the float64 value with attributes for obsrv.
+	ObserveFloat64(obsrv instrument.Float64Observable, value float64, attributes ...attribute.KeyValue)
+	// ObserveInt64 records the int64 value with attributes for obsrv.
+	ObserveInt64(obsrv instrument.Int64Observable, value int64, attributes ...attribute.KeyValue)
+}
+
+// Registration is an token representing the unique registration of a callback
+// for a set of instruments with a Meter.
+type Registration interface {
+	// Unregister removes the callback registration from a Meter.
+	//
+	// This method needs to be idempotent and concurrent safe.
+	Unregister() error
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop.go
new file mode 100644
index 000000000..c586627ae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop.go
@@ -0,0 +1,198 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric/instrument"
+)
+
+// NewNoopMeterProvider creates a MeterProvider that does not record any metrics.
+func NewNoopMeterProvider() MeterProvider {
+	return noopMeterProvider{}
+}
+
+type noopMeterProvider struct{}
+
+func (noopMeterProvider) Meter(string, ...MeterOption) Meter {
+	return noopMeter{}
+}
+
+// NewNoopMeter creates a Meter that does not record any metrics.
+func NewNoopMeter() Meter {
+	return noopMeter{}
+}
+
+type noopMeter struct{}
+
+func (noopMeter) Int64Counter(string, ...instrument.Int64Option) (instrument.Int64Counter, error) {
+	return nonrecordingSyncInt64Instrument{}, nil
+}
+
+func (noopMeter) Int64UpDownCounter(string, ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) {
+	return nonrecordingSyncInt64Instrument{}, nil
+}
+
+func (noopMeter) Int64Histogram(string, ...instrument.Int64Option) (instrument.Int64Histogram, error) {
+	return nonrecordingSyncInt64Instrument{}, nil
+}
+
+func (noopMeter) Int64ObservableCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) {
+	return nonrecordingAsyncInt64Instrument{}, nil
+}
+
+func (noopMeter) Int64ObservableUpDownCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) {
+	return nonrecordingAsyncInt64Instrument{}, nil
+}
+
+func (noopMeter) Int64ObservableGauge(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) {
+	return nonrecordingAsyncInt64Instrument{}, nil
+}
+
+func (noopMeter) Float64Counter(string, ...instrument.Float64Option) (instrument.Float64Counter, error) {
+	return nonrecordingSyncFloat64Instrument{}, nil
+}
+
+func (noopMeter) Float64UpDownCounter(string, ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) {
+	return nonrecordingSyncFloat64Instrument{}, nil
+}
+
+func (noopMeter) Float64Histogram(string, ...instrument.Float64Option) (instrument.Float64Histogram, error) {
+	return nonrecordingSyncFloat64Instrument{}, nil
+}
+
+func (noopMeter) Float64ObservableCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) {
+	return nonrecordingAsyncFloat64Instrument{}, nil
+}
+
+func (noopMeter) Float64ObservableUpDownCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) {
+	return nonrecordingAsyncFloat64Instrument{}, nil
+}
+
+func (noopMeter) Float64ObservableGauge(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) {
+	return nonrecordingAsyncFloat64Instrument{}, nil
+}
+
+// RegisterCallback creates a register callback that does not record any metrics.
+func (noopMeter) RegisterCallback(Callback, ...instrument.Asynchronous) (Registration, error) {
+	return noopReg{}, nil
+}
+
+type noopReg struct{}
+
+func (noopReg) Unregister() error { return nil }
+
+type nonrecordingAsyncFloat64Instrument struct {
+	instrument.Float64Observable
+}
+
+var (
+	_ instrument.Float64ObservableCounter       = nonrecordingAsyncFloat64Instrument{}
+	_ instrument.Float64ObservableUpDownCounter = nonrecordingAsyncFloat64Instrument{}
+	_ instrument.Float64ObservableGauge         = nonrecordingAsyncFloat64Instrument{}
+)
+
+func (n nonrecordingAsyncFloat64Instrument) Counter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) {
+	return n, nil
+}
+
+func (n nonrecordingAsyncFloat64Instrument) UpDownCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) {
+	return n, nil
+}
+
+func (n nonrecordingAsyncFloat64Instrument) Gauge(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) {
+	return n, nil
+}
+
+type nonrecordingAsyncInt64Instrument struct {
+	instrument.Int64Observable
+}
+
+var (
+	_ instrument.Int64ObservableCounter       = nonrecordingAsyncInt64Instrument{}
+	_ instrument.Int64ObservableUpDownCounter = nonrecordingAsyncInt64Instrument{}
+	_ instrument.Int64ObservableGauge         = nonrecordingAsyncInt64Instrument{}
+)
+
+func (n nonrecordingAsyncInt64Instrument) Counter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) {
+	return n, nil
+}
+
+func (n nonrecordingAsyncInt64Instrument) UpDownCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) {
+	return n, nil
+}
+
+func (n nonrecordingAsyncInt64Instrument) Gauge(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) {
+	return n, nil
+}
+
+type nonrecordingSyncFloat64Instrument struct {
+	instrument.Synchronous
+}
+
+var (
+	_ instrument.Float64Counter       = nonrecordingSyncFloat64Instrument{}
+	_ instrument.Float64UpDownCounter = nonrecordingSyncFloat64Instrument{}
+	_ instrument.Float64Histogram     = nonrecordingSyncFloat64Instrument{}
+)
+
+func (n nonrecordingSyncFloat64Instrument) Counter(string, ...instrument.Float64Option) (instrument.Float64Counter, error) {
+	return n, nil
+}
+
+func (n nonrecordingSyncFloat64Instrument) UpDownCounter(string, ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) {
+	return n, nil
+}
+
+func (n nonrecordingSyncFloat64Instrument) Histogram(string, ...instrument.Float64Option) (instrument.Float64Histogram, error) {
+	return n, nil
+}
+
+func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) {
+
+}
+
+func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) {
+
+}
+
+type nonrecordingSyncInt64Instrument struct {
+	instrument.Synchronous
+}
+
+var (
+	_ instrument.Int64Counter       = nonrecordingSyncInt64Instrument{}
+	_ instrument.Int64UpDownCounter = nonrecordingSyncInt64Instrument{}
+	_ instrument.Int64Histogram     = nonrecordingSyncInt64Instrument{}
+)
+
+func (n nonrecordingSyncInt64Instrument) Counter(string, ...instrument.Int64Option) (instrument.Int64Counter, error) {
+	return n, nil
+}
+
+func (n nonrecordingSyncInt64Instrument) UpDownCounter(string, ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) {
+	return n, nil
+}
+
+func (n nonrecordingSyncInt64Instrument) Histogram(string, ...instrument.Int64Option) (instrument.Int64Histogram, error) {
+	return n, nil
+}
+
+func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) {
+}
+func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) {
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go b/vendor/go.opentelemetry.io/otel/metric/unit/doc.go
new file mode 100644
index 000000000..f8e723593
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/unit/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package unit provides units.
+//
+// This package is currently in a pre-GA phase. Backwards incompatible changes
+// may be introduced in subsequent minor version releases as we work to track
+// the evolving OpenTelemetry specification and user feedback.
+package unit // import "go.opentelemetry.io/otel/metric/unit"
diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/vendor/go.opentelemetry.io/otel/metric/unit/unit.go
new file mode 100644
index 000000000..647d77302
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/unit/unit.go
@@ -0,0 +1,25 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package unit // import "go.opentelemetry.io/otel/metric/unit"
+
+// Unit is a determinate standard quantity of measurement.
+type Unit string
+
+// Units defined by OpenTelemetry.
+const (
+	Dimensionless Unit = "1"
+	Bytes         Unit = "By"
+	Milliseconds  Unit = "ms"
+)
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
new file mode 100644
index 000000000..d29aaa32c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/propagation"
+)
+
+// GetTextMapPropagator returns the global TextMapPropagator. If none has been
+// set, a No-Op TextMapPropagator is returned.
+func GetTextMapPropagator() propagation.TextMapPropagator {
+	return global.TextMapPropagator()
+}
+
+// SetTextMapPropagator sets propagator as the global TextMapPropagator.
+func SetTextMapPropagator(propagator propagation.TextMapPropagator) {
+	global.SetTextMapPropagator(propagator)
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
new file mode 100644
index 000000000..303cdf1cb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/baggage"
+)
+
+const baggageHeader = "baggage"
+
+// Baggage is a propagator that supports the W3C Baggage format.
+//
+// This propagates user-defined baggage associated with a trace. The complete
+// specification is defined at https://www.w3.org/TR/baggage/.
+type Baggage struct{}
+
+var _ TextMapPropagator = Baggage{}
+
+// Inject sets baggage key-values from ctx into the carrier.
+func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
+	bStr := baggage.FromContext(ctx).String()
+	if bStr != "" {
+		carrier.Set(baggageHeader, bStr)
+	}
+}
+
+// Extract returns a copy of parent with the baggage from the carrier added.
+func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
+	bStr := carrier.Get(baggageHeader)
+	if bStr == "" {
+		return parent
+	}
+
+	bag, err := baggage.Parse(bStr)
+	if err != nil {
+		return parent
+	}
+	return baggage.ContextWithBaggage(parent, bag)
+}
+
+// Fields returns the keys who's values are set with Inject.
+func (b Baggage) Fields() []string {
+	return []string{baggageHeader}
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go
new file mode 100644
index 000000000..c119eb285
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package propagation contains OpenTelemetry context propagators.
+
+OpenTelemetry propagators are used to extract and inject context data from and
+into messages exchanged by applications. The propagator supported by this
+package is the W3C Trace Context encoding
+(https://www.w3.org/TR/trace-context/), and W3C Baggage
+(https://www.w3.org/TR/baggage/).
+*/
+package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
new file mode 100644
index 000000000..c94438f73
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
@@ -0,0 +1,153 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+	"context"
+	"net/http"
+)
+
+// TextMapCarrier is the storage medium used by a TextMapPropagator.
+type TextMapCarrier interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Get returns the value associated with the passed key.
+	Get(key string) string
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Set stores the key-value pair.
+	Set(key string, value string)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Keys lists the keys stored in this carrier.
+	Keys() []string
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage
+// medium for propagated key-value pairs.
+type MapCarrier map[string]string
+
+// Compile time check that MapCarrier implements the TextMapCarrier.
+var _ TextMapCarrier = MapCarrier{}
+
+// Get returns the value associated with the passed key.
+func (c MapCarrier) Get(key string) string {
+	return c[key]
+}
+
+// Set stores the key-value pair.
+func (c MapCarrier) Set(key, value string) {
+	c[key] = value
+}
+
+// Keys lists the keys stored in this carrier.
+func (c MapCarrier) Keys() []string {
+	keys := make([]string, 0, len(c))
+	for k := range c {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface.
+type HeaderCarrier http.Header
+
+// Get returns the value associated with the passed key.
+func (hc HeaderCarrier) Get(key string) string {
+	return http.Header(hc).Get(key)
+}
+
+// Set stores the key-value pair.
+func (hc HeaderCarrier) Set(key string, value string) {
+	http.Header(hc).Set(key, value)
+}
+
+// Keys lists the keys stored in this carrier.
+func (hc HeaderCarrier) Keys() []string {
+	keys := make([]string, 0, len(hc))
+	for k := range hc {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+// TextMapPropagator propagates cross-cutting concerns as key-value text
+// pairs within a carrier that travels in-band across process boundaries.
+type TextMapPropagator interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Inject set cross-cutting concerns from the Context into the carrier.
+	Inject(ctx context.Context, carrier TextMapCarrier)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Extract reads cross-cutting concerns from the carrier into a Context.
+	Extract(ctx context.Context, carrier TextMapCarrier) context.Context
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Fields returns the keys whose values are set with Inject.
+	Fields() []string
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+type compositeTextMapPropagator []TextMapPropagator
+
+func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) {
+	for _, i := range p {
+		i.Inject(ctx, carrier)
+	}
+}
+
+func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
+	for _, i := range p {
+		ctx = i.Extract(ctx, carrier)
+	}
+	return ctx
+}
+
+func (p compositeTextMapPropagator) Fields() []string {
+	unique := make(map[string]struct{})
+	for _, i := range p {
+		for _, k := range i.Fields() {
+			unique[k] = struct{}{}
+		}
+	}
+
+	fields := make([]string, 0, len(unique))
+	for k := range unique {
+		fields = append(fields, k)
+	}
+	return fields
+}
+
+// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the
+// group of passed TextMapPropagator. This allows different cross-cutting
+// concerns to be propagates in a unified manner.
+//
+// The returned TextMapPropagator will inject and extract cross-cutting
+// concerns in the order the TextMapPropagators were provided. Additionally,
+// the Fields method will return a de-duplicated slice of the keys that are
+// set with the Inject method.
+func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator {
+	return compositeTextMapPropagator(p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
new file mode 100644
index 000000000..902692da0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -0,0 +1,159 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+	"context"
+	"encoding/hex"
+	"fmt"
+	"regexp"
+
+	"go.opentelemetry.io/otel/trace"
+)
+
+const (
+	supportedVersion  = 0
+	maxVersion        = 254
+	traceparentHeader = "traceparent"
+	tracestateHeader  = "tracestate"
+)
+
+// TraceContext is a propagator that supports the W3C Trace Context format
+// (https://www.w3.org/TR/trace-context/)
+//
+// This propagator will propagate the traceparent and tracestate headers to
+// guarantee traces are not broken. It is up to the users of this propagator
+// to choose if they want to participate in a trace by modifying the
+// traceparent header and relevant parts of the tracestate header containing
+// their proprietary information.
+type TraceContext struct{}
+
+var _ TextMapPropagator = TraceContext{}
+var traceCtxRegExp = regexp.MustCompile("^(?P<version>[0-9a-f]{2})-(?P<traceID>[a-f0-9]{32})-(?P<spanID>[a-f0-9]{16})-(?P<traceFlags>[a-f0-9]{2})(?:-.*)?$")
+
+// Inject set tracecontext from the Context into the carrier.
+func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
+	sc := trace.SpanContextFromContext(ctx)
+	if !sc.IsValid() {
+		return
+	}
+
+	if ts := sc.TraceState().String(); ts != "" {
+		carrier.Set(tracestateHeader, ts)
+	}
+
+	// Clear all flags other than the trace-context supported sampling bit.
+	flags := sc.TraceFlags() & trace.FlagsSampled
+
+	h := fmt.Sprintf("%.2x-%s-%s-%s",
+		supportedVersion,
+		sc.TraceID(),
+		sc.SpanID(),
+		flags)
+	carrier.Set(traceparentHeader, h)
+}
+
+// Extract reads tracecontext from the carrier into a returned Context.
+//
+// The returned Context will be a copy of ctx and contain the extracted
+// tracecontext as the remote SpanContext. If the extracted tracecontext is
+// invalid, the passed ctx will be returned directly instead.
+func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
+	sc := tc.extract(carrier)
+	if !sc.IsValid() {
+		return ctx
+	}
+	return trace.ContextWithRemoteSpanContext(ctx, sc)
+}
+
+func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
+	h := carrier.Get(traceparentHeader)
+	if h == "" {
+		return trace.SpanContext{}
+	}
+
+	matches := traceCtxRegExp.FindStringSubmatch(h)
+
+	if len(matches) == 0 {
+		return trace.SpanContext{}
+	}
+
+	if len(matches) < 5 { // four subgroups plus the overall match
+		return trace.SpanContext{}
+	}
+
+	if len(matches[1]) != 2 {
+		return trace.SpanContext{}
+	}
+	ver, err := hex.DecodeString(matches[1])
+	if err != nil {
+		return trace.SpanContext{}
+	}
+	version := int(ver[0])
+	if version > maxVersion {
+		return trace.SpanContext{}
+	}
+
+	if version == 0 && len(matches) != 5 { // four subgroups plus the overall match
+		return trace.SpanContext{}
+	}
+
+	if len(matches[2]) != 32 {
+		return trace.SpanContext{}
+	}
+
+	var scc trace.SpanContextConfig
+
+	scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32])
+	if err != nil {
+		return trace.SpanContext{}
+	}
+
+	if len(matches[3]) != 16 {
+		return trace.SpanContext{}
+	}
+	scc.SpanID, err = trace.SpanIDFromHex(matches[3])
+	if err != nil {
+		return trace.SpanContext{}
+	}
+
+	if len(matches[4]) != 2 {
+		return trace.SpanContext{}
+	}
+	opts, err := hex.DecodeString(matches[4])
+	if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) {
+		return trace.SpanContext{}
+	}
+	// Clear all flags other than the trace-context supported sampling bit.
+	scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
+
+	// Ignore the error returned here. Failure to parse tracestate MUST NOT
+	// affect the parsing of traceparent according to the W3C tracecontext
+	// specification.
+	scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader))
+	scc.Remote = true
+
+	sc := trace.NewSpanContext(scc)
+	if !sc.IsValid() {
+		return trace.SpanContext{}
+	}
+
+	return sc
+}
+
+// Fields returns the keys who's values are set with Inject.
+func (tc TraceContext) Fields() []string {
+	return []string{traceparentHeader, tracestateHeader}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
new file mode 100644
index 000000000..6e923acab
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package instrumentation provides types to represent the code libraries that
+// provide OpenTelemetry instrumentation. These types are used in the
+// OpenTelemetry signal pipelines to identify the source of telemetry.
+//
+// See
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
+// and
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
+// for more information.
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
new file mode 100644
index 000000000..39f025a17
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+// Library represents the instrumentation library.
+// Deprecated: please use Scope instead.
+type Library = Scope
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
new file mode 100644
index 000000000..09c6d93f6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+// Scope represents the instrumentation scope.
+type Scope struct {
+	// Name is the name of the instrumentation scope. This should be the
+	// Go package name of that scope.
+	Name string
+	// Version is the version of the instrumentation scope.
+	Version string
+	// SchemaURL of the telemetry emitted by the scope.
+	SchemaURL string
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
new file mode 100644
index 000000000..5e94b8ae5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package env // import "go.opentelemetry.io/otel/sdk/internal/env"
+
+import (
+	"os"
+	"strconv"
+
+	"go.opentelemetry.io/otel/internal/global"
+)
+
+// Environment variable names.
+const (
+	// BatchSpanProcessorScheduleDelayKey is the delay interval between two
+	// consecutive exports (i.e. 5000).
+	BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY"
+	// BatchSpanProcessorExportTimeoutKey is the maximum allowed time to
+	// export data (i.e. 3000).
+	BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT"
+	// BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
+	BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE"
+	// BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
+	// 512). Note: it must be less than or equal to
+	// EnvBatchSpanProcessorMaxQueueSize.
+	BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
+
+	// AttributeValueLengthKey is the maximum allowed attribute value size.
+	AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+	// AttributeCountKey is the maximum allowed span attribute count.
+	AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT"
+
+	// SpanAttributeValueLengthKey is the maximum allowed attribute value size
+	// for a span.
+	SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+	// SpanAttributeCountKey is the maximum allowed span attribute count for a
+	// span.
+	SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
+
+	// SpanEventCountKey is the maximum allowed span event count.
+	SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT"
+
+	// SpanEventAttributeCountKey is the maximum allowed attribute per span
+	// event count.
+	SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
+
+	// SpanLinkCountKey is the maximum allowed span link count.
+	SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT"
+
+	// SpanLinkAttributeCountKey is the maximum allowed attribute per span
+	// link count.
+	SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
+)
+
+// firstInt returns the value of the first matching environment variable from
+// keys. If the value is not an integer or no match is found, defaultValue is
+// returned.
+func firstInt(defaultValue int, keys ...string) int {
+	for _, key := range keys {
+		value, ok := os.LookupEnv(key)
+		if !ok {
+			continue
+		}
+
+		intValue, err := strconv.Atoi(value)
+		if err != nil {
+			global.Info("Got invalid value, number value expected.", key, value)
+			return defaultValue
+		}
+
+		return intValue
+	}
+
+	return defaultValue
+}
+
+// IntEnvOr returns the int value of the environment variable with name key if
+// it exists and the value is an int. Otherwise, defaultValue is returned.
+func IntEnvOr(key string, defaultValue int) int {
+	value, ok := os.LookupEnv(key)
+	if !ok {
+		return defaultValue
+	}
+
+	intValue, err := strconv.Atoi(value)
+	if err != nil {
+		global.Info("Got invalid value, number value expected.", key, value)
+		return defaultValue
+	}
+
+	return intValue
+}
+
+// BatchSpanProcessorScheduleDelay returns the environment variable value for
+// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorScheduleDelay(defaultValue int) int {
+	return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue)
+}
+
+// BatchSpanProcessorExportTimeout returns the environment variable value for
+// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorExportTimeout(defaultValue int) int {
+	return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxQueueSize returns the environment variable value for
+// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorMaxQueueSize(defaultValue int) int {
+	return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for
+// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
+// is returned.
+func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int {
+	return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue)
+}
+
+// SpanAttributeValueLength returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is
+// returned or defaultValue if that is not set.
+func SpanAttributeValueLength(defaultValue int) int {
+	return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey)
+}
+
+// SpanAttributeCount returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or
+// defaultValue if that is not set.
+func SpanAttributeCount(defaultValue int) int {
+	return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey)
+}
+
+// SpanEventCount returns the environment variable value for the
+// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanEventCount(defaultValue int) int {
+	return IntEnvOr(SpanEventCountKey, defaultValue)
+}
+
+// SpanEventAttributeCount returns the environment variable value for the
+// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue
+// is returned.
+func SpanEventAttributeCount(defaultValue int) int {
+	return IntEnvOr(SpanEventAttributeCountKey, defaultValue)
+}
+
+// SpanLinkCount returns the environment variable value for the
+// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkCount(defaultValue int) int {
+	return IntEnvOr(SpanLinkCountKey, defaultValue)
+}
+
+// SpanLinkAttributeCount returns the environment variable value for the
+// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkAttributeCount(defaultValue int) int {
+	return IntEnvOr(SpanLinkAttributeCountKey, defaultValue)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go
new file mode 100644
index 000000000..84a02306e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/sdk/internal"
+
+import (
+	"fmt"
+	"time"
+
+	"go.opentelemetry.io/otel"
+)
+
+// UserAgent is the user agent to be added to the outgoing
+// requests from the exporters.
+var UserAgent = fmt.Sprintf("opentelemetry-go/%s", otel.Version())
+
+// MonotonicEndTime returns the end time at present
+// but offset from start, monotonically.
+//
+// The monotonic clock is used in subtractions hence
+// the duration since start added back to start gives
+// end as a monotonic time.
+// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
+func MonotonicEndTime(start time.Time) time.Time {
+	return start.Add(time.Since(start))
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
new file mode 100644
index 000000000..c1d220408
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
@@ -0,0 +1,72 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+)
+
+var (
+	// ErrPartialResource is returned by a detector when complete source
+	// information for a Resource is unavailable or the source information
+	// contains invalid values that are omitted from the returned Resource.
+	ErrPartialResource = errors.New("partial resource")
+)
+
+// Detector detects OpenTelemetry resource information.
+type Detector interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Detect returns an initialized Resource based on gathered information.
+	// If the source information to construct a Resource contains invalid
+	// values, a Resource is returned with the valid parts of the source
+	// information used for initialization along with an appropriately
+	// wrapped ErrPartialResource error.
+	Detect(ctx context.Context) (*Resource, error)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// Detect calls all input detectors sequentially and merges each result with the previous one.
+// It returns the merged error too.
+func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
+	var autoDetectedRes *Resource
+	var errInfo []string
+	for _, detector := range detectors {
+		if detector == nil {
+			continue
+		}
+		res, err := detector.Detect(ctx)
+		if err != nil {
+			errInfo = append(errInfo, err.Error())
+			if !errors.Is(err, ErrPartialResource) {
+				continue
+			}
+		}
+		autoDetectedRes, err = Merge(autoDetectedRes, res)
+		if err != nil {
+			errInfo = append(errInfo, err.Error())
+		}
+	}
+
+	var aggregatedError error
+	if len(errInfo) > 0 {
+		aggregatedError = fmt.Errorf("detecting resources: %s", errInfo)
+	}
+	return autoDetectedRes, aggregatedError
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
new file mode 100644
index 000000000..aa0f942f4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -0,0 +1,108 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+type (
+	// telemetrySDK is a Detector that provides information about
+	// the OpenTelemetry SDK used.  This Detector is included as a
+	// builtin. If these resource attributes are not wanted, use
+	// the WithTelemetrySDK(nil) or WithoutBuiltin() options to
+	// explicitly disable them.
+	telemetrySDK struct{}
+
+	// host is a Detector that provides information about the host
+	// being run on. This Detector is included as a builtin. If
+	// these resource attributes are not wanted, use the
+	// WithHost(nil) or WithoutBuiltin() options to explicitly
+	// disable them.
+	host struct{}
+
+	stringDetector struct {
+		schemaURL string
+		K         attribute.Key
+		F         func() (string, error)
+	}
+
+	defaultServiceNameDetector struct{}
+)
+
+var (
+	_ Detector = telemetrySDK{}
+	_ Detector = host{}
+	_ Detector = stringDetector{}
+	_ Detector = defaultServiceNameDetector{}
+)
+
+// Detect returns a *Resource that describes the OpenTelemetry SDK used.
+func (telemetrySDK) Detect(context.Context) (*Resource, error) {
+	return NewWithAttributes(
+		semconv.SchemaURL,
+		semconv.TelemetrySDKName("opentelemetry"),
+		semconv.TelemetrySDKLanguageGo,
+		semconv.TelemetrySDKVersion(otel.Version()),
+	), nil
+}
+
+// Detect returns a *Resource that describes the host being run on.
+func (host) Detect(ctx context.Context) (*Resource, error) {
+	return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx)
+}
+
+// StringDetector returns a Detector that will produce a *Resource
+// containing the string as a value corresponding to k. The resulting Resource
+// will have the specified schemaURL.
+func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector {
+	return stringDetector{schemaURL: schemaURL, K: k, F: f}
+}
+
+// Detect returns a *Resource that describes the string as a value
+// corresponding to attribute.Key as well as the specific schemaURL.
+func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
+	value, err := sd.F()
+	if err != nil {
+		return nil, fmt.Errorf("%s: %w", string(sd.K), err)
+	}
+	a := sd.K.String(value)
+	if !a.Valid() {
+		return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit())
+	}
+	return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil
+}
+
+// Detect implements Detector.
+func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) {
+	return StringDetector(
+		semconv.SchemaURL,
+		semconv.ServiceNameKey,
+		func() (string, error) {
+			executable, err := os.Executable()
+			if err != nil {
+				return "unknown_service:go", nil
+			}
+			return "unknown_service:" + filepath.Base(executable), nil
+		},
+	).Detect(ctx)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
new file mode 100644
index 000000000..f9a2a2999
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
@@ -0,0 +1,201 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// config contains configuration for Resource creation.
+type config struct {
+	// detectors that will be evaluated.
+	detectors []Detector
+	// SchemaURL to associate with the Resource.
+	schemaURL string
+}
+
+// Option is the interface that applies a configuration option.
+type Option interface {
+	// apply sets the Option value of a config.
+	apply(config) config
+}
+
+// WithAttributes adds attributes to the configured Resource.
+func WithAttributes(attributes ...attribute.KeyValue) Option {
+	return WithDetectors(detectAttributes{attributes})
+}
+
+type detectAttributes struct {
+	attributes []attribute.KeyValue
+}
+
+func (d detectAttributes) Detect(context.Context) (*Resource, error) {
+	return NewSchemaless(d.attributes...), nil
+}
+
+// WithDetectors adds detectors to be evaluated for the configured resource.
+func WithDetectors(detectors ...Detector) Option {
+	return detectorsOption{detectors: detectors}
+}
+
+type detectorsOption struct {
+	detectors []Detector
+}
+
+func (o detectorsOption) apply(cfg config) config {
+	cfg.detectors = append(cfg.detectors, o.detectors...)
+	return cfg
+}
+
+// WithFromEnv adds attributes from environment variables to the configured resource.
+func WithFromEnv() Option {
+	return WithDetectors(fromEnv{})
+}
+
+// WithHost adds attributes from the host to the configured resource.
+func WithHost() Option {
+	return WithDetectors(host{})
+}
+
+// WithTelemetrySDK adds TelemetrySDK version info to the configured resource.
+func WithTelemetrySDK() Option {
+	return WithDetectors(telemetrySDK{})
+}
+
+// WithSchemaURL sets the schema URL for the configured resource.
+func WithSchemaURL(schemaURL string) Option {
+	return schemaURLOption(schemaURL)
+}
+
+type schemaURLOption string
+
+func (o schemaURLOption) apply(cfg config) config {
+	cfg.schemaURL = string(o)
+	return cfg
+}
+
+// WithOS adds all the OS attributes to the configured Resource.
+// See individual WithOS* functions to configure specific attributes.
+func WithOS() Option {
+	return WithDetectors(
+		osTypeDetector{},
+		osDescriptionDetector{},
+	)
+}
+
+// WithOSType adds an attribute with the operating system type to the configured Resource.
+func WithOSType() Option {
+	return WithDetectors(osTypeDetector{})
+}
+
+// WithOSDescription adds an attribute with the operating system description to the
+// configured Resource. The formatted string is equivalent to the output of the
+// `uname -snrvm` command.
+func WithOSDescription() Option {
+	return WithDetectors(osDescriptionDetector{})
+}
+
+// WithProcess adds all the Process attributes to the configured Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+//
+// This option is equivalent to calling WithProcessPID,
+// WithProcessExecutableName, WithProcessExecutablePath,
+// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName,
+// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each
+// option function for information about what resource attributes each
+// includes.
+func WithProcess() Option {
+	return WithDetectors(
+		processPIDDetector{},
+		processExecutableNameDetector{},
+		processExecutablePathDetector{},
+		processCommandArgsDetector{},
+		processOwnerDetector{},
+		processRuntimeNameDetector{},
+		processRuntimeVersionDetector{},
+		processRuntimeDescriptionDetector{},
+	)
+}
+
+// WithProcessPID adds an attribute with the process identifier (PID) to the
+// configured Resource.
+func WithProcessPID() Option {
+	return WithDetectors(processPIDDetector{})
+}
+
+// WithProcessExecutableName adds an attribute with the name of the process
+// executable to the configured Resource.
+func WithProcessExecutableName() Option {
+	return WithDetectors(processExecutableNameDetector{})
+}
+
+// WithProcessExecutablePath adds an attribute with the full path to the process
+// executable to the configured Resource.
+func WithProcessExecutablePath() Option {
+	return WithDetectors(processExecutablePathDetector{})
+}
+
+// WithProcessCommandArgs adds an attribute with all the command arguments (including
+// the command/executable itself) as received by the process to the configured
+// Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+func WithProcessCommandArgs() Option {
+	return WithDetectors(processCommandArgsDetector{})
+}
+
+// WithProcessOwner adds an attribute with the username of the user that owns the process
+// to the configured Resource.
+func WithProcessOwner() Option {
+	return WithDetectors(processOwnerDetector{})
+}
+
+// WithProcessRuntimeName adds an attribute with the name of the runtime of this
+// process to the configured Resource.
+func WithProcessRuntimeName() Option {
+	return WithDetectors(processRuntimeNameDetector{})
+}
+
+// WithProcessRuntimeVersion adds an attribute with the version of the runtime of
+// this process to the configured Resource.
+func WithProcessRuntimeVersion() Option {
+	return WithDetectors(processRuntimeVersionDetector{})
+}
+
+// WithProcessRuntimeDescription adds an attribute with an additional description
+// about the runtime of the process to the configured Resource.
+func WithProcessRuntimeDescription() Option {
+	return WithDetectors(processRuntimeDescriptionDetector{})
+}
+
+// WithContainer adds all the Container attributes to the configured Resource.
+// See individual WithContainer* functions to configure specific attributes.
+func WithContainer() Option {
+	return WithDetectors(
+		cgroupContainerIDDetector{},
+	)
+}
+
+// WithContainerID adds an attribute with the id of the container to the configured Resource.
+// Note: WithContainerID will not extract the correct container ID in an ECS environment.
+// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs).
+func WithContainerID() Option {
+	return WithDetectors(cgroupContainerIDDetector{})
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
new file mode 100644
index 000000000..318dcf82f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -0,0 +1,100 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"bufio"
+	"context"
+	"errors"
+	"io"
+	"os"
+	"regexp"
+
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+type containerIDProvider func() (string, error)
+
+var (
+	containerID         containerIDProvider = getContainerIDFromCGroup
+	cgroupContainerIDRe                     = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`)
+)
+
+type cgroupContainerIDDetector struct{}
+
+const cgroupPath = "/proc/self/cgroup"
+
+// Detect returns a *Resource that describes the id of the container.
+// If no container id found, an empty resource will be returned.
+func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
+	containerID, err := containerID()
+	if err != nil {
+		return nil, err
+	}
+
+	if containerID == "" {
+		return Empty(), nil
+	}
+	return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil
+}
+
+var (
+	defaultOSStat = os.Stat
+	osStat        = defaultOSStat
+
+	defaultOSOpen = func(name string) (io.ReadCloser, error) {
+		return os.Open(name)
+	}
+	osOpen = defaultOSOpen
+)
+
+// getContainerIDFromCGroup returns the id of the container from the cgroup file.
+// If no container id found, an empty string will be returned.
+func getContainerIDFromCGroup() (string, error) {
+	if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) {
+		// File does not exist, skip
+		return "", nil
+	}
+
+	file, err := osOpen(cgroupPath)
+	if err != nil {
+		return "", err
+	}
+	defer file.Close()
+
+	return getContainerIDFromReader(file), nil
+}
+
+// getContainerIDFromReader returns the id of the container from reader.
+func getContainerIDFromReader(reader io.Reader) string {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		line := scanner.Text()
+
+		if id := getContainerIDFromLine(line); id != "" {
+			return id
+		}
+	}
+	return ""
+}
+
+// getContainerIDFromLine returns the id of the container from one string line.
+func getContainerIDFromLine(line string) string {
+	matches := cgroupContainerIDRe.FindStringSubmatch(line)
+	if len(matches) <= 1 {
+		return ""
+	}
+	return matches[1]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
new file mode 100644
index 000000000..9aab3d839
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package resource provides detecting and representing resources.
+//
+// The fundamental struct is a Resource which holds identifying information
+// about the entities for which telemetry is exported.
+//
+// To automatically construct Resources from an environment a Detector
+// interface is defined. Implementations of this interface can be passed to
+// the Detect function to generate a Resource from the merged information.
+//
+// To load a user defined Resource from the environment variable
+// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret
+// the value as a list of comma delimited key/value pairs
+// (e.g. `<key1>=<value1>,<key2>=<value2>,...`).
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
new file mode 100644
index 000000000..e32843cad
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -0,0 +1,108 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"fmt"
+	"net/url"
+	"os"
+	"strings"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+const (
+	// resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
+	resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES"
+
+	// svcNameKey is the environment variable name that Service Name information will be read from.
+	svcNameKey = "OTEL_SERVICE_NAME"
+)
+
+var (
+	// errMissingValue is returned when a resource value is missing.
+	errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
+)
+
+// fromEnv is a Detector that implements the Detector and collects
+// resources from environment.  This Detector is included as a
+// builtin.
+type fromEnv struct{}
+
+// compile time assertion that FromEnv implements Detector interface.
+var _ Detector = fromEnv{}
+
+// Detect collects resources from environment.
+func (fromEnv) Detect(context.Context) (*Resource, error) {
+	attrs := strings.TrimSpace(os.Getenv(resourceAttrKey))
+	svcName := strings.TrimSpace(os.Getenv(svcNameKey))
+
+	if attrs == "" && svcName == "" {
+		return Empty(), nil
+	}
+
+	var res *Resource
+
+	if svcName != "" {
+		res = NewSchemaless(semconv.ServiceName(svcName))
+	}
+
+	r2, err := constructOTResources(attrs)
+
+	// Ensure that the resource with the service name from OTEL_SERVICE_NAME
+	// takes precedence, if it was defined.
+	res, err2 := Merge(r2, res)
+
+	if err == nil {
+		err = err2
+	} else if err2 != nil {
+		err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
+	}
+
+	return res, err
+}
+
+func constructOTResources(s string) (*Resource, error) {
+	if s == "" {
+		return Empty(), nil
+	}
+	pairs := strings.Split(s, ",")
+	attrs := []attribute.KeyValue{}
+	var invalid []string
+	for _, p := range pairs {
+		field := strings.SplitN(p, "=", 2)
+		if len(field) != 2 {
+			invalid = append(invalid, p)
+			continue
+		}
+		k := strings.TrimSpace(field[0])
+		v, err := url.QueryUnescape(strings.TrimSpace(field[1]))
+		if err != nil {
+			// Retain original value if decoding fails, otherwise it will be
+			// an empty string.
+			v = field[1]
+			otel.Handle(err)
+		}
+		attrs = append(attrs, attribute.String(k, v))
+	}
+	var err error
+	if len(invalid) > 0 {
+		err = fmt.Errorf("%w: %v", errMissingValue, invalid)
+	}
+	return NewSchemaless(attrs...), err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
new file mode 100644
index 000000000..815fe5c20
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -0,0 +1,97 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"strings"
+
+	"go.opentelemetry.io/otel/attribute"
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+type osDescriptionProvider func() (string, error)
+
+var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription
+
+var osDescription = defaultOSDescriptionProvider
+
+func setDefaultOSDescriptionProvider() {
+	setOSDescriptionProvider(defaultOSDescriptionProvider)
+}
+
+func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
+	osDescription = osDescriptionProvider
+}
+
+type osTypeDetector struct{}
+type osDescriptionDetector struct{}
+
+// Detect returns a *Resource that describes the operating system type the
+// service is running on.
+func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
+	osType := runtimeOS()
+
+	osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
+
+	return NewWithAttributes(
+		semconv.SchemaURL,
+		osTypeAttribute,
+	), nil
+}
+
+// Detect returns a *Resource that describes the operating system the
+// service is running on.
+func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+	description, err := osDescription()
+
+	if err != nil {
+		return nil, err
+	}
+
+	return NewWithAttributes(
+		semconv.SchemaURL,
+		semconv.OSDescription(description),
+	), nil
+}
+
+// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime
+// into an OS type attribute with the corresponding value defined by the semantic
+// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase
+// and used as the value for the returned OS type attribute.
+func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
+	// the elements in this map are the intersection between
+	// available GOOS values and defined semconv OS types
+	osTypeAttributeMap := map[string]attribute.KeyValue{
+		"darwin":    semconv.OSTypeDarwin,
+		"dragonfly": semconv.OSTypeDragonflyBSD,
+		"freebsd":   semconv.OSTypeFreeBSD,
+		"linux":     semconv.OSTypeLinux,
+		"netbsd":    semconv.OSTypeNetBSD,
+		"openbsd":   semconv.OSTypeOpenBSD,
+		"solaris":   semconv.OSTypeSolaris,
+		"windows":   semconv.OSTypeWindows,
+	}
+
+	var osTypeAttribute attribute.KeyValue
+
+	if attr, ok := osTypeAttributeMap[osType]; ok {
+		osTypeAttribute = attr
+	} else {
+		osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType))
+	}
+
+	return osTypeAttribute
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
new file mode 100644
index 000000000..24ec85793
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
@@ -0,0 +1,102 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"encoding/xml"
+	"fmt"
+	"io"
+	"os"
+)
+
+type plist struct {
+	XMLName xml.Name `xml:"plist"`
+	Dict    dict     `xml:"dict"`
+}
+
+type dict struct {
+	Key    []string `xml:"key"`
+	String []string `xml:"string"`
+}
+
+// osRelease builds a string describing the operating system release based on the
+// contents of the property list (.plist) system files. If no .plist files are found,
+// or if the required properties to build the release description string are missing,
+// an empty string is returned instead. The generated string resembles the output of
+// the `sw_vers` commandline program, but in a single-line string. For more information
+// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS.
+func osRelease() string {
+	file, err := getPlistFile()
+	if err != nil {
+		return ""
+	}
+
+	defer file.Close()
+
+	values, err := parsePlistFile(file)
+	if err != nil {
+		return ""
+	}
+
+	return buildOSRelease(values)
+}
+
+// getPlistFile returns a *os.File pointing to one of the well-known .plist files
+// available on macOS. If no file can be opened, it returns an error.
+func getPlistFile() (*os.File, error) {
+	return getFirstAvailableFile([]string{
+		"/System/Library/CoreServices/SystemVersion.plist",
+		"/System/Library/CoreServices/ServerVersion.plist",
+	})
+}
+
+// parsePlistFile process the file pointed by `file` as a .plist file and returns
+// a map with the key-values for each pair of correlated <key> and <string> elements
+// contained in it.
+func parsePlistFile(file io.Reader) (map[string]string, error) {
+	var v plist
+
+	err := xml.NewDecoder(file).Decode(&v)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(v.Dict.Key) != len(v.Dict.String) {
+		return nil, fmt.Errorf("the number of <key> and <string> elements doesn't match")
+	}
+
+	properties := make(map[string]string, len(v.Dict.Key))
+	for i, key := range v.Dict.Key {
+		properties[key] = v.Dict.String[i]
+	}
+
+	return properties, nil
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It tries to find the `ProductName`, `ProductVersion`
+// and `ProductBuildVersion` properties. If some of these properties are not found,
+// it returns an empty string.
+func buildOSRelease(properties map[string]string) string {
+	productName := properties["ProductName"]
+	productVersion := properties["ProductVersion"]
+	productBuildVersion := properties["ProductBuildVersion"]
+
+	if productName == "" || productVersion == "" || productBuildVersion == "" {
+		return ""
+	}
+
+	return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
new file mode 100644
index 000000000..fba6790e4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -0,0 +1,154 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+)
+
+// osRelease builds a string describing the operating system release based on the
+// properties of the os-release file. If no os-release file is found, or if the
+// required properties to build the release description string are missing, an empty
+// string is returned instead. For more information about os-release files, see:
+// https://www.freedesktop.org/software/systemd/man/os-release.html
+func osRelease() string {
+	file, err := getOSReleaseFile()
+	if err != nil {
+		return ""
+	}
+
+	defer file.Close()
+
+	values := parseOSReleaseFile(file)
+
+	return buildOSRelease(values)
+}
+
+// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release
+// files, according to their order of preference. If no file can be opened, it
+// returns an error.
+func getOSReleaseFile() (*os.File, error) {
+	return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"})
+}
+
+// parseOSReleaseFile process the file pointed by `file` as an os-release file and
+// returns a map with the key-values contained in it. Empty lines or lines starting
+// with a '#' character are ignored, as well as lines with the missing key=value
+// separator. Values are unquoted and unescaped.
+func parseOSReleaseFile(file io.Reader) map[string]string {
+	values := make(map[string]string)
+	scanner := bufio.NewScanner(file)
+
+	for scanner.Scan() {
+		line := scanner.Text()
+
+		if skip(line) {
+			continue
+		}
+
+		key, value, ok := parse(line)
+		if ok {
+			values[key] = value
+		}
+	}
+
+	return values
+}
+
+// skip returns true if the line is blank or starts with a '#' character, and
+// therefore should be skipped from processing.
+func skip(line string) bool {
+	line = strings.TrimSpace(line)
+
+	return len(line) == 0 || strings.HasPrefix(line, "#")
+}
+
+// parse attempts to split the provided line on the first '=' character, and then
+// sanitize each side of the split before returning them as a key-value pair.
+func parse(line string) (string, string, bool) {
+	parts := strings.SplitN(line, "=", 2)
+
+	if len(parts) != 2 || len(parts[0]) == 0 {
+		return "", "", false
+	}
+
+	key := strings.TrimSpace(parts[0])
+	value := unescape(unquote(strings.TrimSpace(parts[1])))
+
+	return key, value, true
+}
+
+// unquote checks whether the string `s` is quoted with double or single quotes
+// and, if so, returns a version of the string without them. Otherwise it returns
+// the provided string unchanged.
+func unquote(s string) string {
+	if len(s) < 2 {
+		return s
+	}
+
+	if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] {
+		return s[1 : len(s)-1]
+	}
+
+	return s
+}
+
+// unescape removes the `\` prefix from some characters that are expected
+// to have it added in front of them for escaping purposes.
+func unescape(s string) string {
+	return strings.NewReplacer(
+		`\$`, `$`,
+		`\"`, `"`,
+		`\'`, `'`,
+		`\\`, `\`,
+		"\\`", "`",
+	).Replace(s)
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It favors a combination of the `NAME` and `VERSION`
+// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't
+// found), and using `PRETTY_NAME` alone if some of the previous are not present. If
+// none of these properties are found, it returns an empty string.
+//
+// The rationale behind not using `PRETTY_NAME` as first choice was that, for some
+// Linux distributions, it doesn't include the same detail that can be found on the
+// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with
+// other properties can produce "pretty" redundant strings in some cases.
+func buildOSRelease(values map[string]string) string {
+	var osRelease string
+
+	name := values["NAME"]
+	version := values["VERSION"]
+
+	if version == "" {
+		version = values["VERSION_ID"]
+	}
+
+	if name != "" && version != "" {
+		osRelease = fmt.Sprintf("%s %s", name, version)
+	} else {
+		osRelease = values["PRETTY_NAME"]
+	}
+
+	return osRelease
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
new file mode 100644
index 000000000..1c84afc18
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
@@ -0,0 +1,90 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"fmt"
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+type unameProvider func(buf *unix.Utsname) (err error)
+
+var defaultUnameProvider unameProvider = unix.Uname
+
+var currentUnameProvider = defaultUnameProvider
+
+func setDefaultUnameProvider() {
+	setUnameProvider(defaultUnameProvider)
+}
+
+func setUnameProvider(unameProvider unameProvider) {
+	currentUnameProvider = unameProvider
+}
+
+// platformOSDescription returns a human readable OS version information string.
+// The final string combines OS release information (where available) and the
+// result of the `uname` system call.
+func platformOSDescription() (string, error) {
+	uname, err := uname()
+	if err != nil {
+		return "", err
+	}
+
+	osRelease := osRelease()
+	if osRelease != "" {
+		return fmt.Sprintf("%s (%s)", osRelease, uname), nil
+	}
+
+	return uname, nil
+}
+
+// uname issues a uname(2) system call (or equivalent on systems which doesn't
+// have one) and formats the output in a single string, similar to the output
+// of the `uname` commandline program. The final string resembles the one
+// obtained with a call to `uname -snrvm`.
+func uname() (string, error) {
+	var utsName unix.Utsname
+
+	err := currentUnameProvider(&utsName)
+	if err != nil {
+		return "", err
+	}
+
+	return fmt.Sprintf("%s %s %s %s %s",
+		unix.ByteSliceToString(utsName.Sysname[:]),
+		unix.ByteSliceToString(utsName.Nodename[:]),
+		unix.ByteSliceToString(utsName.Release[:]),
+		unix.ByteSliceToString(utsName.Version[:]),
+		unix.ByteSliceToString(utsName.Machine[:]),
+	), nil
+}
+
+// getFirstAvailableFile returns an *os.File of the first available
+// file from a list of candidate file paths.
+func getFirstAvailableFile(candidates []string) (*os.File, error) {
+	for _, c := range candidates {
+		file, err := os.Open(c)
+		if err == nil {
+			return file, nil
+		}
+	}
+
+	return nil, fmt.Errorf("no candidate file available: %v", candidates)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
new file mode 100644
index 000000000..3ebcb534f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
@@ -0,0 +1,34 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !aix
+// +build !darwin
+// +build !dragonfly
+// +build !freebsd
+// +build !linux
+// +build !netbsd
+// +build !openbsd
+// +build !solaris
+// +build !windows
+// +build !zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// platformOSDescription is a placeholder implementation for OSes
+// for which this project currently doesn't support os.description
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+func platformOSDescription() (string, error) {
+	return "<unknown>", nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
new file mode 100644
index 000000000..faad64d8d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
@@ -0,0 +1,101 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"fmt"
+	"strconv"
+
+	"golang.org/x/sys/windows/registry"
+)
+
+// platformOSDescription returns a human readable OS version information string.
+// It does so by querying registry values under the
+// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string
+// resembles the one displayed by the Version Reporter Applet (winver.exe).
+func platformOSDescription() (string, error) {
+	k, err := registry.OpenKey(
+		registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+
+	if err != nil {
+		return "", err
+	}
+
+	defer k.Close()
+
+	var (
+		productName               = readProductName(k)
+		displayVersion            = readDisplayVersion(k)
+		releaseID                 = readReleaseID(k)
+		currentMajorVersionNumber = readCurrentMajorVersionNumber(k)
+		currentMinorVersionNumber = readCurrentMinorVersionNumber(k)
+		currentBuildNumber        = readCurrentBuildNumber(k)
+		ubr                       = readUBR(k)
+	)
+
+	if displayVersion != "" {
+		displayVersion += " "
+	}
+
+	return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]",
+		productName,
+		displayVersion,
+		releaseID,
+		currentMajorVersionNumber,
+		currentMinorVersionNumber,
+		currentBuildNumber,
+		ubr,
+	), nil
+}
+
+func getStringValue(name string, k registry.Key) string {
+	value, _, _ := k.GetStringValue(name)
+
+	return value
+}
+
+func getIntegerValue(name string, k registry.Key) uint64 {
+	value, _, _ := k.GetIntegerValue(name)
+
+	return value
+}
+
+func readProductName(k registry.Key) string {
+	return getStringValue("ProductName", k)
+}
+
+func readDisplayVersion(k registry.Key) string {
+	return getStringValue("DisplayVersion", k)
+}
+
+func readReleaseID(k registry.Key) string {
+	return getStringValue("ReleaseID", k)
+}
+
+func readCurrentMajorVersionNumber(k registry.Key) string {
+	return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10)
+}
+
+func readCurrentMinorVersionNumber(k registry.Key) string {
+	return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10)
+}
+
+func readCurrentBuildNumber(k registry.Key) string {
+	return getStringValue("CurrentBuildNumber", k)
+}
+
+func readUBR(k registry.Key) string {
+	return strconv.FormatUint(getIntegerValue("UBR", k), 10)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
new file mode 100644
index 000000000..bdd0e7fe6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -0,0 +1,180 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"runtime"
+
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+type pidProvider func() int
+type executablePathProvider func() (string, error)
+type commandArgsProvider func() []string
+type ownerProvider func() (*user.User, error)
+type runtimeNameProvider func() string
+type runtimeVersionProvider func() string
+type runtimeOSProvider func() string
+type runtimeArchProvider func() string
+
+var (
+	defaultPidProvider            pidProvider            = os.Getpid
+	defaultExecutablePathProvider executablePathProvider = os.Executable
+	defaultCommandArgsProvider    commandArgsProvider    = func() []string { return os.Args }
+	defaultOwnerProvider          ownerProvider          = user.Current
+	defaultRuntimeNameProvider    runtimeNameProvider    = func() string {
+		if runtime.Compiler == "gc" {
+			return "go"
+		}
+		return runtime.Compiler
+	}
+	defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
+	defaultRuntimeOSProvider      runtimeOSProvider      = func() string { return runtime.GOOS }
+	defaultRuntimeArchProvider    runtimeArchProvider    = func() string { return runtime.GOARCH }
+)
+
+var (
+	pid            = defaultPidProvider
+	executablePath = defaultExecutablePathProvider
+	commandArgs    = defaultCommandArgsProvider
+	owner          = defaultOwnerProvider
+	runtimeName    = defaultRuntimeNameProvider
+	runtimeVersion = defaultRuntimeVersionProvider
+	runtimeOS      = defaultRuntimeOSProvider
+	runtimeArch    = defaultRuntimeArchProvider
+)
+
+func setDefaultOSProviders() {
+	setOSProviders(
+		defaultPidProvider,
+		defaultExecutablePathProvider,
+		defaultCommandArgsProvider,
+	)
+}
+
+func setOSProviders(
+	pidProvider pidProvider,
+	executablePathProvider executablePathProvider,
+	commandArgsProvider commandArgsProvider,
+) {
+	pid = pidProvider
+	executablePath = executablePathProvider
+	commandArgs = commandArgsProvider
+}
+
+func setDefaultRuntimeProviders() {
+	setRuntimeProviders(
+		defaultRuntimeNameProvider,
+		defaultRuntimeVersionProvider,
+		defaultRuntimeOSProvider,
+		defaultRuntimeArchProvider,
+	)
+}
+
+func setRuntimeProviders(
+	runtimeNameProvider runtimeNameProvider,
+	runtimeVersionProvider runtimeVersionProvider,
+	runtimeOSProvider runtimeOSProvider,
+	runtimeArchProvider runtimeArchProvider,
+) {
+	runtimeName = runtimeNameProvider
+	runtimeVersion = runtimeVersionProvider
+	runtimeOS = runtimeOSProvider
+	runtimeArch = runtimeArchProvider
+}
+
+func setDefaultUserProviders() {
+	setUserProviders(defaultOwnerProvider)
+}
+
+func setUserProviders(ownerProvider ownerProvider) {
+	owner = ownerProvider
+}
+
+type processPIDDetector struct{}
+type processExecutableNameDetector struct{}
+type processExecutablePathDetector struct{}
+type processCommandArgsDetector struct{}
+type processOwnerDetector struct{}
+type processRuntimeNameDetector struct{}
+type processRuntimeVersionDetector struct{}
+type processRuntimeDescriptionDetector struct{}
+
+// Detect returns a *Resource that describes the process identifier (PID) of the
+// executing process.
+func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
+}
+
+// Detect returns a *Resource that describes the name of the process executable.
+func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
+	executableName := filepath.Base(commandArgs()[0])
+
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
+}
+
+// Detect returns a *Resource that describes the full path of the process executable.
+func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
+	executablePath, err := executablePath()
+	if err != nil {
+		return nil, err
+	}
+
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil
+}
+
+// Detect returns a *Resource that describes all the command arguments as received
+// by the process.
+func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
+}
+
+// Detect returns a *Resource that describes the username of the user that owns the
+// process.
+func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
+	owner, err := owner()
+	if err != nil {
+		return nil, err
+	}
+
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil
+}
+
+// Detect returns a *Resource that describes the name of the compiler used to compile
+// this process image.
+func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
+}
+
+// Detect returns a *Resource that describes the version of the runtime of this process.
+func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
+	return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
+}
+
+// Detect returns a *Resource that describes the runtime of this process.
+func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+	runtimeDescription := fmt.Sprintf(
+		"go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
+
+	return NewWithAttributes(
+		semconv.SchemaURL,
+		semconv.ProcessRuntimeDescription(runtimeDescription),
+	), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
new file mode 100644
index 000000000..c425ff05d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -0,0 +1,282 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// Resource describes an entity about which identifying information
+// and metadata is exposed.  Resource is an immutable object,
+// equivalent to a map from key to unique value.
+//
+// Resources should be passed and stored as pointers
+// (`*resource.Resource`).  The `nil` value is equivalent to an empty
+// Resource.
+type Resource struct {
+	attrs     attribute.Set
+	schemaURL string
+}
+
+var (
+	emptyResource       Resource
+	defaultResource     *Resource
+	defaultResourceOnce sync.Once
+)
+
+var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL")
+
+// New returns a Resource combined from the user-provided detectors.
+func New(ctx context.Context, opts ...Option) (*Resource, error) {
+	cfg := config{}
+	for _, opt := range opts {
+		cfg = opt.apply(cfg)
+	}
+
+	resource, err := Detect(ctx, cfg.detectors...)
+
+	var err2 error
+	resource, err2 = Merge(resource, &Resource{schemaURL: cfg.schemaURL})
+	if err == nil {
+		err = err2
+	} else if err2 != nil {
+		err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
+	}
+
+	return resource, err
+}
+
+// NewWithAttributes creates a resource from attrs and associates the resource with a
+// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs
+// contains any invalid items those items will be dropped. The attrs are assumed to be
+// in a schema identified by schemaURL.
+func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource {
+	resource := NewSchemaless(attrs...)
+	resource.schemaURL = schemaURL
+	return resource
+}
+
+// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys,
+// the last value will be used. If attrs contains any invalid items those items will
+// be dropped. The resource will not be associated with a schema URL. If the schema
+// of the attrs is known use NewWithAttributes instead.
+func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
+	if len(attrs) == 0 {
+		return &emptyResource
+	}
+
+	// Ensure attributes comply with the specification:
+	// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/common/common.md#attributes
+	s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool {
+		return kv.Valid()
+	})
+
+	// If attrs only contains invalid entries do not allocate a new resource.
+	if s.Len() == 0 {
+		return &emptyResource
+	}
+
+	return &Resource{attrs: s} //nolint
+}
+
+// String implements the Stringer interface and provides a
+// human-readable form of the resource.
+//
+// Avoid using this representation as the key in a map of resources,
+// use Equivalent() as the key instead.
+func (r *Resource) String() string {
+	if r == nil {
+		return ""
+	}
+	return r.attrs.Encoded(attribute.DefaultEncoder())
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+func (r *Resource) MarshalLog() interface{} {
+	return struct {
+		Attributes attribute.Set
+		SchemaURL  string
+	}{
+		Attributes: r.attrs,
+		SchemaURL:  r.schemaURL,
+	}
+}
+
+// Attributes returns a copy of attributes from the resource in a sorted order.
+// To avoid allocating a new slice, use an iterator.
+func (r *Resource) Attributes() []attribute.KeyValue {
+	if r == nil {
+		r = Empty()
+	}
+	return r.attrs.ToSlice()
+}
+
+// SchemaURL returns the schema URL associated with Resource r.
+func (r *Resource) SchemaURL() string {
+	if r == nil {
+		return ""
+	}
+	return r.schemaURL
+}
+
+// Iter returns an iterator of the Resource attributes.
+// This is ideal to use if you do not want a copy of the attributes.
+func (r *Resource) Iter() attribute.Iterator {
+	if r == nil {
+		r = Empty()
+	}
+	return r.attrs.Iter()
+}
+
+// Equal returns true when a Resource is equivalent to this Resource.
+func (r *Resource) Equal(eq *Resource) bool {
+	if r == nil {
+		r = Empty()
+	}
+	if eq == nil {
+		eq = Empty()
+	}
+	return r.Equivalent() == eq.Equivalent()
+}
+
+// Merge creates a new resource by combining resource a and b.
+//
+// If there are common keys between resource a and b, then the value
+// from resource b will overwrite the value from resource a, even
+// if resource b's value is empty.
+//
+// The SchemaURL of the resources will be merged according to the spec rules:
+// https://github.com/open-telemetry/opentelemetry-specification/blob/bad49c714a62da5493f2d1d9bafd7ebe8c8ce7eb/specification/resource/sdk.md#merge
+// If the resources have different non-empty schemaURL an empty resource and an error
+// will be returned.
+func Merge(a, b *Resource) (*Resource, error) {
+	if a == nil && b == nil {
+		return Empty(), nil
+	}
+	if a == nil {
+		return b, nil
+	}
+	if b == nil {
+		return a, nil
+	}
+
+	// Merge the schema URL.
+	var schemaURL string
+	switch true {
+	case a.schemaURL == "":
+		schemaURL = b.schemaURL
+	case b.schemaURL == "":
+		schemaURL = a.schemaURL
+	case a.schemaURL == b.schemaURL:
+		schemaURL = a.schemaURL
+	default:
+		return Empty(), errMergeConflictSchemaURL
+	}
+
+	// Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
+	// Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
+	mi := attribute.NewMergeIterator(b.Set(), a.Set())
+	combine := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+	for mi.Next() {
+		combine = append(combine, mi.Attribute())
+	}
+	merged := NewWithAttributes(schemaURL, combine...)
+	return merged, nil
+}
+
+// Empty returns an instance of Resource with no attributes. It is
+// equivalent to a `nil` Resource.
+func Empty() *Resource {
+	return &emptyResource
+}
+
+// Default returns an instance of Resource with a default
+// "service.name" and OpenTelemetrySDK attributes.
+func Default() *Resource {
+	defaultResourceOnce.Do(func() {
+		var err error
+		defaultResource, err = Detect(
+			context.Background(),
+			defaultServiceNameDetector{},
+			fromEnv{},
+			telemetrySDK{},
+		)
+		if err != nil {
+			otel.Handle(err)
+		}
+		// If Detect did not return a valid resource, fall back to emptyResource.
+		if defaultResource == nil {
+			defaultResource = &emptyResource
+		}
+	})
+	return defaultResource
+}
+
+// Environment returns an instance of Resource with attributes
+// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
+func Environment() *Resource {
+	detector := &fromEnv{}
+	resource, err := detector.Detect(context.Background())
+	if err != nil {
+		otel.Handle(err)
+	}
+	return resource
+}
+
+// Equivalent returns an object that can be compared for equality
+// between two resources. This value is suitable for use as a key in
+// a map.
+func (r *Resource) Equivalent() attribute.Distinct {
+	return r.Set().Equivalent()
+}
+
+// Set returns the equivalent *attribute.Set of this resource's attributes.
+func (r *Resource) Set() *attribute.Set {
+	if r == nil {
+		r = Empty()
+	}
+	return &r.attrs
+}
+
+// MarshalJSON encodes the resource attributes as a JSON list of { "Key":
+// "...", "Value": ... } pairs in order sorted by key.
+func (r *Resource) MarshalJSON() ([]byte, error) {
+	if r == nil {
+		r = Empty()
+	}
+	return r.attrs.MarshalJSON()
+}
+
+// Len returns the number of unique key-values in this Resource.
+func (r *Resource) Len() int {
+	if r == nil {
+		return 0
+	}
+	return r.attrs.Len()
+}
+
+// Encoded returns an encoded representation of the resource.
+func (r *Resource) Encoded(enc attribute.Encoder) string {
+	if r == nil {
+		return ""
+	}
+	return r.attrs.Encoded(enc)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
new file mode 100644
index 000000000..a2d7db490
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -0,0 +1,432 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/internal/env"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Defaults for BatchSpanProcessorOptions.
+const (
+	DefaultMaxQueueSize       = 2048
+	DefaultScheduleDelay      = 5000
+	DefaultExportTimeout      = 30000
+	DefaultMaxExportBatchSize = 512
+)
+
+// BatchSpanProcessorOption configures a BatchSpanProcessor.
+type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
+
+// BatchSpanProcessorOptions is configuration settings for a
+// BatchSpanProcessor.
+type BatchSpanProcessorOptions struct {
+	// MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the
+	// queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior.
+	// The default value of MaxQueueSize is 2048.
+	MaxQueueSize int
+
+	// BatchTimeout is the maximum duration for constructing a batch. Processor
+	// forcefully sends available spans when timeout is reached.
+	// The default value of BatchTimeout is 5000 msec.
+	BatchTimeout time.Duration
+
+	// ExportTimeout specifies the maximum duration for exporting spans. If the timeout
+	// is reached, the export will be cancelled.
+	// The default value of ExportTimeout is 30000 msec.
+	ExportTimeout time.Duration
+
+	// MaxExportBatchSize is the maximum number of spans to process in a single batch.
+	// If there are more than one batch worth of spans then it processes multiple batches
+	// of spans one batch after the other without any delay.
+	// The default value of MaxExportBatchSize is 512.
+	MaxExportBatchSize int
+
+	// BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
+	// AND if BlockOnQueueFull is set to true.
+	// Blocking option should be used carefully as it can severely affect the performance of an
+	// application.
+	BlockOnQueueFull bool
+}
+
+// batchSpanProcessor is a SpanProcessor that batches asynchronously-received
+// spans and sends them to a trace.Exporter when complete.
+type batchSpanProcessor struct {
+	e SpanExporter
+	o BatchSpanProcessorOptions
+
+	queue   chan ReadOnlySpan
+	dropped uint32
+
+	batch      []ReadOnlySpan
+	batchMutex sync.Mutex
+	timer      *time.Timer
+	stopWait   sync.WaitGroup
+	stopOnce   sync.Once
+	stopCh     chan struct{}
+}
+
+var _ SpanProcessor = (*batchSpanProcessor)(nil)
+
+// NewBatchSpanProcessor creates a new SpanProcessor that will send completed
+// span batches to the exporter with the supplied options.
+//
+// If the exporter is nil, the span processor will preform no action.
+func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor {
+	maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize)
+	maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
+
+	if maxExportBatchSize > maxQueueSize {
+		if DefaultMaxExportBatchSize > maxQueueSize {
+			maxExportBatchSize = maxQueueSize
+		} else {
+			maxExportBatchSize = DefaultMaxExportBatchSize
+		}
+	}
+
+	o := BatchSpanProcessorOptions{
+		BatchTimeout:       time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
+		ExportTimeout:      time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
+		MaxQueueSize:       maxQueueSize,
+		MaxExportBatchSize: maxExportBatchSize,
+	}
+	for _, opt := range options {
+		opt(&o)
+	}
+	bsp := &batchSpanProcessor{
+		e:      exporter,
+		o:      o,
+		batch:  make([]ReadOnlySpan, 0, o.MaxExportBatchSize),
+		timer:  time.NewTimer(o.BatchTimeout),
+		queue:  make(chan ReadOnlySpan, o.MaxQueueSize),
+		stopCh: make(chan struct{}),
+	}
+
+	bsp.stopWait.Add(1)
+	go func() {
+		defer bsp.stopWait.Done()
+		bsp.processQueue()
+		bsp.drainQueue()
+	}()
+
+	return bsp
+}
+
+// OnStart method does nothing.
+func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
+
+// OnEnd method enqueues a ReadOnlySpan for later processing.
+func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
+	// Do not enqueue spans if we are just going to drop them.
+	if bsp.e == nil {
+		return
+	}
+	bsp.enqueue(s)
+}
+
+// Shutdown flushes the queue and waits until all spans are processed.
+// It only executes once. Subsequent call does nothing.
+func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
+	var err error
+	bsp.stopOnce.Do(func() {
+		wait := make(chan struct{})
+		go func() {
+			close(bsp.stopCh)
+			bsp.stopWait.Wait()
+			if bsp.e != nil {
+				if err := bsp.e.Shutdown(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+			close(wait)
+		}()
+		// Wait until the wait group is done or the context is cancelled
+		select {
+		case <-wait:
+		case <-ctx.Done():
+			err = ctx.Err()
+		}
+	})
+	return err
+}
+
+type forceFlushSpan struct {
+	ReadOnlySpan
+	flushed chan struct{}
+}
+
+func (f forceFlushSpan) SpanContext() trace.SpanContext {
+	return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
+}
+
+// ForceFlush exports all ended spans that have not yet been exported.
+func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
+	var err error
+	if bsp.e != nil {
+		flushCh := make(chan struct{})
+		if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) {
+			select {
+			case <-flushCh:
+				// Processed any items in queue prior to ForceFlush being called
+			case <-ctx.Done():
+				return ctx.Err()
+			}
+		}
+
+		wait := make(chan error)
+		go func() {
+			wait <- bsp.exportSpans(ctx)
+			close(wait)
+		}()
+		// Wait until the export is finished or the context is cancelled/timed out
+		select {
+		case err = <-wait:
+		case <-ctx.Done():
+			err = ctx.Err()
+		}
+	}
+	return err
+}
+
+// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the
+// maximum queue size allowed for a BatchSpanProcessor.
+func WithMaxQueueSize(size int) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.MaxQueueSize = size
+	}
+}
+
+// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures
+// the maximum export batch size allowed for a BatchSpanProcessor.
+func WithMaxExportBatchSize(size int) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.MaxExportBatchSize = size
+	}
+}
+
+// WithBatchTimeout returns a BatchSpanProcessorOption that configures the
+// maximum delay allowed for a BatchSpanProcessor before it will export any
+// held span (whether the queue is full or not).
+func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.BatchTimeout = delay
+	}
+}
+
+// WithExportTimeout returns a BatchSpanProcessorOption that configures the
+// amount of time a BatchSpanProcessor waits for an exporter to export before
+// abandoning the export.
+func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.ExportTimeout = timeout
+	}
+}
+
+// WithBlocking returns a BatchSpanProcessorOption that configures a
+// BatchSpanProcessor to wait for enqueue operations to succeed instead of
+// dropping data when the queue is full.
+func WithBlocking() BatchSpanProcessorOption {
+	return func(o *BatchSpanProcessorOptions) {
+		o.BlockOnQueueFull = true
+	}
+}
+
+// exportSpans is a subroutine of processing and draining the queue.
+func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
+	bsp.timer.Reset(bsp.o.BatchTimeout)
+
+	bsp.batchMutex.Lock()
+	defer bsp.batchMutex.Unlock()
+
+	if bsp.o.ExportTimeout > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout)
+		defer cancel()
+	}
+
+	if l := len(bsp.batch); l > 0 {
+		global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
+		err := bsp.e.ExportSpans(ctx, bsp.batch)
+
+		// A new batch is always created after exporting, even if the batch failed to be exported.
+		//
+		// It is up to the exporter to implement any type of retry logic if a batch is failing
+		// to be exported, since it is specific to the protocol and backend being sent to.
+		bsp.batch = bsp.batch[:0]
+
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// processQueue removes spans from the `queue` channel until processor
+// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
+// waiting up to BatchTimeout to form a batch.
+func (bsp *batchSpanProcessor) processQueue() {
+	defer bsp.timer.Stop()
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	for {
+		select {
+		case <-bsp.stopCh:
+			return
+		case <-bsp.timer.C:
+			if err := bsp.exportSpans(ctx); err != nil {
+				otel.Handle(err)
+			}
+		case sd := <-bsp.queue:
+			if ffs, ok := sd.(forceFlushSpan); ok {
+				close(ffs.flushed)
+				continue
+			}
+			bsp.batchMutex.Lock()
+			bsp.batch = append(bsp.batch, sd)
+			shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize
+			bsp.batchMutex.Unlock()
+			if shouldExport {
+				if !bsp.timer.Stop() {
+					<-bsp.timer.C
+				}
+				if err := bsp.exportSpans(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+		}
+	}
+}
+
+// drainQueue awaits the any caller that had added to bsp.stopWait
+// to finish the enqueue, then exports the final batch.
+func (bsp *batchSpanProcessor) drainQueue() {
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	for {
+		select {
+		case sd := <-bsp.queue:
+			if sd == nil {
+				if err := bsp.exportSpans(ctx); err != nil {
+					otel.Handle(err)
+				}
+				return
+			}
+
+			bsp.batchMutex.Lock()
+			bsp.batch = append(bsp.batch, sd)
+			shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize
+			bsp.batchMutex.Unlock()
+
+			if shouldExport {
+				if err := bsp.exportSpans(ctx); err != nil {
+					otel.Handle(err)
+				}
+			}
+		default:
+			close(bsp.queue)
+		}
+	}
+}
+
+func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
+	ctx := context.TODO()
+	if bsp.o.BlockOnQueueFull {
+		bsp.enqueueBlockOnQueueFull(ctx, sd)
+	} else {
+		bsp.enqueueDrop(ctx, sd)
+	}
+}
+
+func recoverSendOnClosedChan() {
+	x := recover()
+	switch err := x.(type) {
+	case nil:
+		return
+	case runtime.Error:
+		if err.Error() == "send on closed channel" {
+			return
+		}
+	}
+	panic(x)
+}
+
+func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool {
+	if !sd.SpanContext().IsSampled() {
+		return false
+	}
+
+	// This ensures the bsp.queue<- below does not panic as the
+	// processor shuts down.
+	defer recoverSendOnClosedChan()
+
+	select {
+	case <-bsp.stopCh:
+		return false
+	default:
+	}
+
+	select {
+	case bsp.queue <- sd:
+		return true
+	case <-ctx.Done():
+		return false
+	}
+}
+
+func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
+	if !sd.SpanContext().IsSampled() {
+		return false
+	}
+
+	// This ensures the bsp.queue<- below does not panic as the
+	// processor shuts down.
+	defer recoverSendOnClosedChan()
+
+	select {
+	case <-bsp.stopCh:
+		return false
+	default:
+	}
+
+	select {
+	case bsp.queue <- sd:
+		return true
+	default:
+		atomic.AddUint32(&bsp.dropped, 1)
+	}
+	return false
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+func (bsp *batchSpanProcessor) MarshalLog() interface{} {
+	return struct {
+		Type         string
+		SpanExporter SpanExporter
+		Config       BatchSpanProcessorOptions
+	}{
+		Type:         "BatchSpanProcessor",
+		SpanExporter: bsp.e,
+		Config:       bsp.o,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
new file mode 100644
index 000000000..0285e99be
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package trace contains support for OpenTelemetry distributed tracing.
+
+The following assumes a basic familiarity with OpenTelemetry concepts.
+See https://opentelemetry.io.
+*/
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
new file mode 100644
index 000000000..1e3b42675
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// Event is a thing that happened during a Span's lifetime.
+type Event struct {
+	// Name is the name of this event
+	Name string
+
+	// Attributes describe the aspects of the event.
+	Attributes []attribute.KeyValue
+
+	// DroppedAttributeCount is the number of attributes that were not
+	// recorded due to configured limits being reached.
+	DroppedAttributeCount int
+
+	// Time at which this event was recorded.
+	Time time.Time
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
new file mode 100644
index 000000000..d1c86e59b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
@@ -0,0 +1,44 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+// evictedQueue is a FIFO queue with a configurable capacity.
+type evictedQueue struct {
+	queue        []interface{}
+	capacity     int
+	droppedCount int
+}
+
+func newEvictedQueue(capacity int) evictedQueue {
+	// Do not pre-allocate queue, do this lazily.
+	return evictedQueue{capacity: capacity}
+}
+
+// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
+// queued value will be discarded and the drop count incremented.
+func (eq *evictedQueue) add(value interface{}) {
+	if eq.capacity == 0 {
+		eq.droppedCount++
+		return
+	}
+
+	if eq.capacity > 0 && len(eq.queue) == eq.capacity {
+		// Drop first-in while avoiding allocating more capacity to eq.queue.
+		copy(eq.queue[:eq.capacity-1], eq.queue[1:])
+		eq.queue = eq.queue[:eq.capacity-1]
+		eq.droppedCount++
+	}
+	eq.queue = append(eq.queue, value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
new file mode 100644
index 000000000..bba246041
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	crand "crypto/rand"
+	"encoding/binary"
+	"math/rand"
+	"sync"
+
+	"go.opentelemetry.io/otel/trace"
+)
+
+// IDGenerator allows custom generators for TraceID and SpanID.
+type IDGenerator interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// NewIDs returns a new trace and span ID.
+	NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// NewSpanID returns a ID for a new span in the trace with traceID.
+	NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+type randomIDGenerator struct {
+	sync.Mutex
+	randSource *rand.Rand
+}
+
+var _ IDGenerator = &randomIDGenerator{}
+
+// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
+func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
+	gen.Lock()
+	defer gen.Unlock()
+	sid := trace.SpanID{}
+	_, _ = gen.randSource.Read(sid[:])
+	return sid
+}
+
+// NewIDs returns a non-zero trace ID and a non-zero span ID from a
+// randomly-chosen sequence.
+func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
+	gen.Lock()
+	defer gen.Unlock()
+	tid := trace.TraceID{}
+	_, _ = gen.randSource.Read(tid[:])
+	sid := trace.SpanID{}
+	_, _ = gen.randSource.Read(sid[:])
+	return tid, sid
+}
+
+func defaultIDGenerator() IDGenerator {
+	gen := &randomIDGenerator{}
+	var rngSeed int64
+	_ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed)
+	gen.randSource = rand.New(rand.NewSource(rngSeed))
+	return gen
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
new file mode 100644
index 000000000..19cfea4ba
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
@@ -0,0 +1,34 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+type Link struct {
+	// SpanContext of the linked Span.
+	SpanContext trace.SpanContext
+
+	// Attributes describe the aspects of the link.
+	Attributes []attribute.KeyValue
+
+	// DroppedAttributeCount is the number of attributes that were not
+	// recorded due to configured limits being reached.
+	DroppedAttributeCount int
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
new file mode 100644
index 000000000..201c17817
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -0,0 +1,461 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"fmt"
+	"sync"
+	"sync/atomic"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/trace"
+)
+
+const (
+	defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
+)
+
+// tracerProviderConfig.
+type tracerProviderConfig struct {
+	// processors contains collection of SpanProcessors that are processing pipeline
+	// for spans in the trace signal.
+	// SpanProcessors registered with a TracerProvider and are called at the start
+	// and end of a Span's lifecycle, and are called in the order they are
+	// registered.
+	processors []SpanProcessor
+
+	// sampler is the default sampler used when creating new spans.
+	sampler Sampler
+
+	// idGenerator is used to generate all Span and Trace IDs when needed.
+	idGenerator IDGenerator
+
+	// spanLimits defines the attribute, event, and link limits for spans.
+	spanLimits SpanLimits
+
+	// resource contains attributes representing an entity that produces telemetry.
+	resource *resource.Resource
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this exporter.
+func (cfg tracerProviderConfig) MarshalLog() interface{} {
+	return struct {
+		SpanProcessors  []SpanProcessor
+		SamplerType     string
+		IDGeneratorType string
+		SpanLimits      SpanLimits
+		Resource        *resource.Resource
+	}{
+		SpanProcessors:  cfg.processors,
+		SamplerType:     fmt.Sprintf("%T", cfg.sampler),
+		IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator),
+		SpanLimits:      cfg.spanLimits,
+		Resource:        cfg.resource,
+	}
+}
+
+// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
+// instrumentation so it can trace operational flow through a system.
+type TracerProvider struct {
+	mu             sync.Mutex
+	namedTracer    map[instrumentation.Scope]*tracer
+	spanProcessors atomic.Value
+	isShutdown     bool
+
+	// These fields are not protected by the lock mu. They are assumed to be
+	// immutable after creation of the TracerProvider.
+	sampler     Sampler
+	idGenerator IDGenerator
+	spanLimits  SpanLimits
+	resource    *resource.Resource
+}
+
+var _ trace.TracerProvider = &TracerProvider{}
+
+// NewTracerProvider returns a new and configured TracerProvider.
+//
+// By default the returned TracerProvider is configured with:
+//   - a ParentBased(AlwaysSample) Sampler
+//   - a random number IDGenerator
+//   - the resource.Default() Resource
+//   - the default SpanLimits.
+//
+// The passed opts are used to override these default values and configure the
+// returned TracerProvider appropriately.
+func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
+	o := tracerProviderConfig{
+		spanLimits: NewSpanLimits(),
+	}
+	o = applyTracerProviderEnvConfigs(o)
+
+	for _, opt := range opts {
+		o = opt.apply(o)
+	}
+
+	o = ensureValidTracerProviderConfig(o)
+
+	tp := &TracerProvider{
+		namedTracer: make(map[instrumentation.Scope]*tracer),
+		sampler:     o.sampler,
+		idGenerator: o.idGenerator,
+		spanLimits:  o.spanLimits,
+		resource:    o.resource,
+	}
+	global.Info("TracerProvider created", "config", o)
+
+	spss := spanProcessorStates{}
+	for _, sp := range o.processors {
+		spss = append(spss, newSpanProcessorState(sp))
+	}
+	tp.spanProcessors.Store(spss)
+
+	return tp
+}
+
+// Tracer returns a Tracer with the given name and options. If a Tracer for
+// the given name and options does not exist it is created, otherwise the
+// existing Tracer is returned.
+//
+// If name is empty, DefaultTracerName is used instead.
+//
+// This method is safe to be called concurrently.
+func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+	c := trace.NewTracerConfig(opts...)
+
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if name == "" {
+		name = defaultTracerName
+	}
+	is := instrumentation.Scope{
+		Name:      name,
+		Version:   c.InstrumentationVersion(),
+		SchemaURL: c.SchemaURL(),
+	}
+	t, ok := p.namedTracer[is]
+	if !ok {
+		t = &tracer{
+			provider:             p,
+			instrumentationScope: is,
+		}
+		p.namedTracer[is] = t
+		global.Info("Tracer created", "name", name, "version", c.InstrumentationVersion(), "schemaURL", c.SchemaURL())
+	}
+	return t
+}
+
+// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
+func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.isShutdown {
+		return
+	}
+	newSPS := spanProcessorStates{}
+	newSPS = append(newSPS, p.spanProcessors.Load().(spanProcessorStates)...)
+	newSPS = append(newSPS, newSpanProcessorState(sp))
+	p.spanProcessors.Store(newSPS)
+}
+
+// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
+func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.isShutdown {
+		return
+	}
+	old := p.spanProcessors.Load().(spanProcessorStates)
+	if len(old) == 0 {
+		return
+	}
+	spss := spanProcessorStates{}
+	spss = append(spss, old...)
+
+	// stop the span processor if it is started and remove it from the list
+	var stopOnce *spanProcessorState
+	var idx int
+	for i, sps := range spss {
+		if sps.sp == sp {
+			stopOnce = sps
+			idx = i
+		}
+	}
+	if stopOnce != nil {
+		stopOnce.state.Do(func() {
+			if err := sp.Shutdown(context.Background()); err != nil {
+				otel.Handle(err)
+			}
+		})
+	}
+	if len(spss) > 1 {
+		copy(spss[idx:], spss[idx+1:])
+	}
+	spss[len(spss)-1] = nil
+	spss = spss[:len(spss)-1]
+
+	p.spanProcessors.Store(spss)
+}
+
+// ForceFlush immediately exports all spans that have not yet been exported for
+// all the registered span processors.
+func (p *TracerProvider) ForceFlush(ctx context.Context) error {
+	spss := p.spanProcessors.Load().(spanProcessorStates)
+	if len(spss) == 0 {
+		return nil
+	}
+
+	for _, sps := range spss {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		if err := sps.sp.ForceFlush(ctx); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Shutdown shuts down TracerProvider. All registered span processors are shut down
+// in the order they were registered and any held computational resources are released.
+func (p *TracerProvider) Shutdown(ctx context.Context) error {
+	spss := p.spanProcessors.Load().(spanProcessorStates)
+	if len(spss) == 0 {
+		return nil
+	}
+
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	p.isShutdown = true
+
+	var retErr error
+	for _, sps := range spss {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		var err error
+		sps.state.Do(func() {
+			err = sps.sp.Shutdown(ctx)
+		})
+		if err != nil {
+			if retErr == nil {
+				retErr = err
+			} else {
+				// Poor man's list of errors
+				retErr = fmt.Errorf("%v; %v", retErr, err)
+			}
+		}
+	}
+	p.spanProcessors.Store(spanProcessorStates{})
+	return retErr
+}
+
+// TracerProviderOption configures a TracerProvider.
+type TracerProviderOption interface {
+	apply(tracerProviderConfig) tracerProviderConfig
+}
+
+type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig
+
+func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig {
+	return fn(cfg)
+}
+
+// WithSyncer registers the exporter with the TracerProvider using a
+// SimpleSpanProcessor.
+//
+// This is not recommended for production use. The synchronous nature of the
+// SimpleSpanProcessor that will wrap the exporter make it good for testing,
+// debugging, or showing examples of other feature, but it will be slow and
+// have a high computation resource usage overhead. The WithBatcher option is
+// recommended for production use instead.
+func WithSyncer(e SpanExporter) TracerProviderOption {
+	return WithSpanProcessor(NewSimpleSpanProcessor(e))
+}
+
+// WithBatcher registers the exporter with the TracerProvider using a
+// BatchSpanProcessor configured with the passed opts.
+func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption {
+	return WithSpanProcessor(NewBatchSpanProcessor(e, opts...))
+}
+
+// WithSpanProcessor registers the SpanProcessor with a TracerProvider.
+func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		cfg.processors = append(cfg.processors, sp)
+		return cfg
+	})
+}
+
+// WithResource returns a TracerProviderOption that will configure the
+// Resource r as a TracerProvider's Resource. The configured Resource is
+// referenced by all the Tracers the TracerProvider creates. It represents the
+// entity producing telemetry.
+//
+// If this option is not used, the TracerProvider will use the
+// resource.Default() Resource by default.
+func WithResource(r *resource.Resource) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		var err error
+		cfg.resource, err = resource.Merge(resource.Environment(), r)
+		if err != nil {
+			otel.Handle(err)
+		}
+		return cfg
+	})
+}
+
+// WithIDGenerator returns a TracerProviderOption that will configure the
+// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator
+// is used by the Tracers the TracerProvider creates to generate new Span and
+// Trace IDs.
+//
+// If this option is not used, the TracerProvider will use a random number
+// IDGenerator by default.
+func WithIDGenerator(g IDGenerator) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		if g != nil {
+			cfg.idGenerator = g
+		}
+		return cfg
+	})
+}
+
+// WithSampler returns a TracerProviderOption that will configure the Sampler
+// s as a TracerProvider's Sampler. The configured Sampler is used by the
+// Tracers the TracerProvider creates to make their sampling decisions for the
+// Spans they create.
+//
+// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER
+// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used
+// and the sampler is not configured through environment variables or the environment
+// contains invalid/unsupported configuration, the TracerProvider will use a
+// ParentBased(AlwaysSample) Sampler by default.
+func WithSampler(s Sampler) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		if s != nil {
+			cfg.sampler = s
+		}
+		return cfg
+	})
+}
+
+// WithSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span
+// created by a Tracer from the TracerProvider.
+//
+// If any field of sl is zero or negative it will be replaced with the default
+// value for that field.
+//
+// If this or WithRawSpanLimits are not provided, the TracerProvider will use
+// the limits defined by environment variables, or the defaults if unset.
+// Refer to the NewSpanLimits documentation for information about this
+// relationship.
+//
+// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited
+// and zero limits. This option will be kept until the next major version
+// incremented release.
+func WithSpanLimits(sl SpanLimits) TracerProviderOption {
+	if sl.AttributeValueLengthLimit <= 0 {
+		sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit
+	}
+	if sl.AttributeCountLimit <= 0 {
+		sl.AttributeCountLimit = DefaultAttributeCountLimit
+	}
+	if sl.EventCountLimit <= 0 {
+		sl.EventCountLimit = DefaultEventCountLimit
+	}
+	if sl.AttributePerEventCountLimit <= 0 {
+		sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit
+	}
+	if sl.LinkCountLimit <= 0 {
+		sl.LinkCountLimit = DefaultLinkCountLimit
+	}
+	if sl.AttributePerLinkCountLimit <= 0 {
+		sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit
+	}
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		cfg.spanLimits = sl
+		return cfg
+	})
+}
+
+// WithRawSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use these limits. These limits bound any Span created by
+// a Tracer from the TracerProvider.
+//
+// The limits will be used as-is. Zero or negative values will not be changed
+// to the default value like WithSpanLimits does. Setting a limit to zero will
+// effectively disable the related resource it limits and setting to a
+// negative value will mean that resource is unlimited. Consequentially, this
+// means that the zero-value SpanLimits will disable all span resources.
+// Because of this, limits should be constructed using NewSpanLimits and
+// updated accordingly.
+//
+// If this or WithSpanLimits are not provided, the TracerProvider will use the
+// limits defined by environment variables, or the defaults if unset. Refer to
+// the NewSpanLimits documentation for information about this relationship.
+func WithRawSpanLimits(limits SpanLimits) TracerProviderOption {
+	return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+		cfg.spanLimits = limits
+		return cfg
+	})
+}
+
+func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig {
+	for _, opt := range tracerProviderOptionsFromEnv() {
+		cfg = opt.apply(cfg)
+	}
+
+	return cfg
+}
+
+func tracerProviderOptionsFromEnv() []TracerProviderOption {
+	var opts []TracerProviderOption
+
+	sampler, err := samplerFromEnv()
+	if err != nil {
+		otel.Handle(err)
+	}
+
+	if sampler != nil {
+		opts = append(opts, WithSampler(sampler))
+	}
+
+	return opts
+}
+
+// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid.
+func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig {
+	if cfg.sampler == nil {
+		cfg.sampler = ParentBased(AlwaysSample())
+	}
+	if cfg.idGenerator == nil {
+		cfg.idGenerator = defaultIDGenerator()
+	}
+	if cfg.resource == nil {
+		cfg.resource = resource.Default()
+	}
+	return cfg
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
new file mode 100644
index 000000000..02053b318
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
@@ -0,0 +1,108 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+const (
+	tracesSamplerKey    = "OTEL_TRACES_SAMPLER"
+	tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG"
+
+	samplerAlwaysOn                = "always_on"
+	samplerAlwaysOff               = "always_off"
+	samplerTraceIDRatio            = "traceidratio"
+	samplerParentBasedAlwaysOn     = "parentbased_always_on"
+	samplerParsedBasedAlwaysOff    = "parentbased_always_off"
+	samplerParentBasedTraceIDRatio = "parentbased_traceidratio"
+)
+
+type errUnsupportedSampler string
+
+func (e errUnsupportedSampler) Error() string {
+	return fmt.Sprintf("unsupported sampler: %s", string(e))
+}
+
+var (
+	errNegativeTraceIDRatio       = errors.New("invalid trace ID ratio: less than 0.0")
+	errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0")
+)
+
+type samplerArgParseError struct {
+	parseErr error
+}
+
+func (e samplerArgParseError) Error() string {
+	return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error())
+}
+
+func (e samplerArgParseError) Unwrap() error {
+	return e.parseErr
+}
+
+func samplerFromEnv() (Sampler, error) {
+	sampler, ok := os.LookupEnv(tracesSamplerKey)
+	if !ok {
+		return nil, nil
+	}
+
+	sampler = strings.ToLower(strings.TrimSpace(sampler))
+	samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey)
+	samplerArg = strings.TrimSpace(samplerArg)
+
+	switch sampler {
+	case samplerAlwaysOn:
+		return AlwaysSample(), nil
+	case samplerAlwaysOff:
+		return NeverSample(), nil
+	case samplerTraceIDRatio:
+		if !hasSamplerArg {
+			return TraceIDRatioBased(1.0), nil
+		}
+		return parseTraceIDRatio(samplerArg)
+	case samplerParentBasedAlwaysOn:
+		return ParentBased(AlwaysSample()), nil
+	case samplerParsedBasedAlwaysOff:
+		return ParentBased(NeverSample()), nil
+	case samplerParentBasedTraceIDRatio:
+		if !hasSamplerArg {
+			return ParentBased(TraceIDRatioBased(1.0)), nil
+		}
+		ratio, err := parseTraceIDRatio(samplerArg)
+		return ParentBased(ratio), err
+	default:
+		return nil, errUnsupportedSampler(sampler)
+	}
+}
+
+func parseTraceIDRatio(arg string) (Sampler, error) {
+	v, err := strconv.ParseFloat(arg, 64)
+	if err != nil {
+		return TraceIDRatioBased(1.0), samplerArgParseError{err}
+	}
+	if v < 0.0 {
+		return TraceIDRatioBased(1.0), errNegativeTraceIDRatio
+	}
+	if v > 1.0 {
+		return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio
+	}
+
+	return TraceIDRatioBased(v), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
new file mode 100644
index 000000000..5ee9715d2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -0,0 +1,293 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"encoding/binary"
+	"fmt"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Sampler decides whether a trace should be sampled and exported.
+type Sampler interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ShouldSample returns a SamplingResult based on a decision made from the
+	// passed parameters.
+	ShouldSample(parameters SamplingParameters) SamplingResult
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Description returns information describing the Sampler.
+	Description() string
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+// SamplingParameters contains the values passed to a Sampler.
+type SamplingParameters struct {
+	ParentContext context.Context
+	TraceID       trace.TraceID
+	Name          string
+	Kind          trace.SpanKind
+	Attributes    []attribute.KeyValue
+	Links         []trace.Link
+}
+
+// SamplingDecision indicates whether a span is dropped, recorded and/or sampled.
+type SamplingDecision uint8
+
+// Valid sampling decisions.
+const (
+	// Drop will not record the span and all attributes/events will be dropped.
+	Drop SamplingDecision = iota
+
+	// Record indicates the span's `IsRecording() == true`, but `Sampled` flag
+	// *must not* be set.
+	RecordOnly
+
+	// RecordAndSample has span's `IsRecording() == true` and `Sampled` flag
+	// *must* be set.
+	RecordAndSample
+)
+
+// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate.
+type SamplingResult struct {
+	Decision   SamplingDecision
+	Attributes []attribute.KeyValue
+	Tracestate trace.TraceState
+}
+
+type traceIDRatioSampler struct {
+	traceIDUpperBound uint64
+	description       string
+}
+
+func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult {
+	psc := trace.SpanContextFromContext(p.ParentContext)
+	x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1
+	if x < ts.traceIDUpperBound {
+		return SamplingResult{
+			Decision:   RecordAndSample,
+			Tracestate: psc.TraceState(),
+		}
+	}
+	return SamplingResult{
+		Decision:   Drop,
+		Tracestate: psc.TraceState(),
+	}
+}
+
+func (ts traceIDRatioSampler) Description() string {
+	return ts.description
+}
+
+// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will
+// always sample. Fractions < 0 are treated as zero. To respect the
+// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used
+// as a delegate of a `Parent` sampler.
+//
+//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased`
+func TraceIDRatioBased(fraction float64) Sampler {
+	if fraction >= 1 {
+		return AlwaysSample()
+	}
+
+	if fraction <= 0 {
+		fraction = 0
+	}
+
+	return &traceIDRatioSampler{
+		traceIDUpperBound: uint64(fraction * (1 << 63)),
+		description:       fmt.Sprintf("TraceIDRatioBased{%g}", fraction),
+	}
+}
+
+type alwaysOnSampler struct{}
+
+func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
+	return SamplingResult{
+		Decision:   RecordAndSample,
+		Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+	}
+}
+
+func (as alwaysOnSampler) Description() string {
+	return "AlwaysOnSampler"
+}
+
+// AlwaysSample returns a Sampler that samples every trace.
+// Be careful about using this sampler in a production application with
+// significant traffic: a new trace will be started and exported for every
+// request.
+func AlwaysSample() Sampler {
+	return alwaysOnSampler{}
+}
+
+type alwaysOffSampler struct{}
+
+func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
+	return SamplingResult{
+		Decision:   Drop,
+		Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+	}
+}
+
+func (as alwaysOffSampler) Description() string {
+	return "AlwaysOffSampler"
+}
+
+// NeverSample returns a Sampler that samples no traces.
+func NeverSample() Sampler {
+	return alwaysOffSampler{}
+}
+
+// ParentBased returns a composite sampler which behaves differently,
+// based on the parent of the span. If the span has no parent,
+// the root(Sampler) is used to make sampling decision. If the span has
+// a parent, depending on whether the parent is remote and whether it
+// is sampled, one of the following samplers will apply:
+//   - remoteParentSampled(Sampler) (default: AlwaysOn)
+//   - remoteParentNotSampled(Sampler) (default: AlwaysOff)
+//   - localParentSampled(Sampler) (default: AlwaysOn)
+//   - localParentNotSampled(Sampler) (default: AlwaysOff)
+func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
+	return parentBased{
+		root:   root,
+		config: configureSamplersForParentBased(samplers),
+	}
+}
+
+type parentBased struct {
+	root   Sampler
+	config samplerConfig
+}
+
+func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig {
+	c := samplerConfig{
+		remoteParentSampled:    AlwaysSample(),
+		remoteParentNotSampled: NeverSample(),
+		localParentSampled:     AlwaysSample(),
+		localParentNotSampled:  NeverSample(),
+	}
+
+	for _, so := range samplers {
+		c = so.apply(c)
+	}
+
+	return c
+}
+
+// samplerConfig is a group of options for parentBased sampler.
+type samplerConfig struct {
+	remoteParentSampled, remoteParentNotSampled Sampler
+	localParentSampled, localParentNotSampled   Sampler
+}
+
+// ParentBasedSamplerOption configures the sampler for a particular sampling case.
+type ParentBasedSamplerOption interface {
+	apply(samplerConfig) samplerConfig
+}
+
+// WithRemoteParentSampled sets the sampler for the case of sampled remote parent.
+func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption {
+	return remoteParentSampledOption{s}
+}
+
+type remoteParentSampledOption struct {
+	s Sampler
+}
+
+func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig {
+	config.remoteParentSampled = o.s
+	return config
+}
+
+// WithRemoteParentNotSampled sets the sampler for the case of remote parent
+// which is not sampled.
+func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption {
+	return remoteParentNotSampledOption{s}
+}
+
+type remoteParentNotSampledOption struct {
+	s Sampler
+}
+
+func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+	config.remoteParentNotSampled = o.s
+	return config
+}
+
+// WithLocalParentSampled sets the sampler for the case of sampled local parent.
+func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption {
+	return localParentSampledOption{s}
+}
+
+type localParentSampledOption struct {
+	s Sampler
+}
+
+func (o localParentSampledOption) apply(config samplerConfig) samplerConfig {
+	config.localParentSampled = o.s
+	return config
+}
+
+// WithLocalParentNotSampled sets the sampler for the case of local parent
+// which is not sampled.
+func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption {
+	return localParentNotSampledOption{s}
+}
+
+type localParentNotSampledOption struct {
+	s Sampler
+}
+
+func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+	config.localParentNotSampled = o.s
+	return config
+}
+
+func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult {
+	psc := trace.SpanContextFromContext(p.ParentContext)
+	if psc.IsValid() {
+		if psc.IsRemote() {
+			if psc.IsSampled() {
+				return pb.config.remoteParentSampled.ShouldSample(p)
+			}
+			return pb.config.remoteParentNotSampled.ShouldSample(p)
+		}
+
+		if psc.IsSampled() {
+			return pb.config.localParentSampled.ShouldSample(p)
+		}
+		return pb.config.localParentNotSampled.ShouldSample(p)
+	}
+	return pb.root.ShouldSample(p)
+}
+
+func (pb parentBased) Description() string {
+	return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+
+		"remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}",
+		pb.root.Description(),
+		pb.config.remoteParentSampled.Description(),
+		pb.config.remoteParentNotSampled.Description(),
+		pb.config.localParentSampled.Description(),
+		pb.config.localParentNotSampled.Description(),
+	)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
new file mode 100644
index 000000000..e8530a959
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -0,0 +1,128 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"sync"
+
+	"go.opentelemetry.io/otel"
+)
+
+// simpleSpanProcessor is a SpanProcessor that synchronously sends all
+// completed Spans to a trace.Exporter immediately.
+type simpleSpanProcessor struct {
+	exporterMu sync.RWMutex
+	exporter   SpanExporter
+	stopOnce   sync.Once
+}
+
+var _ SpanProcessor = (*simpleSpanProcessor)(nil)
+
+// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously
+// send completed spans to the exporter immediately.
+//
+// This SpanProcessor is not recommended for production use. The synchronous
+// nature of this SpanProcessor make it good for testing, debugging, or
+// showing examples of other feature, but it will be slow and have a high
+// computation resource usage overhead. The BatchSpanProcessor is recommended
+// for production use instead.
+func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
+	ssp := &simpleSpanProcessor{
+		exporter: exporter,
+	}
+	return ssp
+}
+
+// OnStart does nothing.
+func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
+
+// OnEnd immediately exports a ReadOnlySpan.
+func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
+	ssp.exporterMu.RLock()
+	defer ssp.exporterMu.RUnlock()
+
+	if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
+		if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
+			otel.Handle(err)
+		}
+	}
+}
+
+// Shutdown shuts down the exporter this SimpleSpanProcessor exports to.
+func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
+	var err error
+	ssp.stopOnce.Do(func() {
+		stopFunc := func(exp SpanExporter) (<-chan error, func()) {
+			done := make(chan error)
+			return done, func() { done <- exp.Shutdown(ctx) }
+		}
+
+		// The exporter field of the simpleSpanProcessor needs to be zeroed to
+		// signal it is shut down, meaning all subsequent calls to OnEnd will
+		// be gracefully ignored. This needs to be done synchronously to avoid
+		// any race condition.
+		//
+		// A closure is used to keep reference to the exporter and then the
+		// field is zeroed. This ensures the simpleSpanProcessor is shut down
+		// before the exporter. This order is important as it avoids a
+		// potential deadlock. If the exporter shut down operation generates a
+		// span, that span would need to be exported. Meaning, OnEnd would be
+		// called and try acquiring the lock that is held here.
+		ssp.exporterMu.Lock()
+		done, shutdown := stopFunc(ssp.exporter)
+		ssp.exporter = nil
+		ssp.exporterMu.Unlock()
+
+		go shutdown()
+
+		// Wait for the exporter to shut down or the deadline to expire.
+		select {
+		case err = <-done:
+		case <-ctx.Done():
+			// It is possible for the exporter to have immediately shut down
+			// and the context to be done simultaneously. In that case this
+			// outer select statement will randomly choose a case. This will
+			// result in a different returned error for similar scenarios.
+			// Instead, double check if the exporter shut down at the same
+			// time and return that error if so. This will ensure consistency
+			// as well as ensure the caller knows the exporter shut down
+			// successfully (they can already determine if the deadline is
+			// expired given they passed the context).
+			select {
+			case err = <-done:
+			default:
+				err = ctx.Err()
+			}
+		}
+	})
+	return err
+}
+
+// ForceFlush does nothing as there is no data to flush.
+func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
+	return nil
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
+func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
+	return struct {
+		Type     string
+		Exporter SpanExporter
+	}{
+		Type:     "SimpleSpanProcessor",
+		Exporter: ssp.exporter,
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
new file mode 100644
index 000000000..0349b2f19
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -0,0 +1,144 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// snapshot is an record of a spans state at a particular checkpointed time.
+// It is used as a read-only representation of that state.
+type snapshot struct {
+	name                  string
+	spanContext           trace.SpanContext
+	parent                trace.SpanContext
+	spanKind              trace.SpanKind
+	startTime             time.Time
+	endTime               time.Time
+	attributes            []attribute.KeyValue
+	events                []Event
+	links                 []Link
+	status                Status
+	childSpanCount        int
+	droppedAttributeCount int
+	droppedEventCount     int
+	droppedLinkCount      int
+	resource              *resource.Resource
+	instrumentationScope  instrumentation.Scope
+}
+
+var _ ReadOnlySpan = snapshot{}
+
+func (s snapshot) private() {}
+
+// Name returns the name of the span.
+func (s snapshot) Name() string {
+	return s.name
+}
+
+// SpanContext returns the unique SpanContext that identifies the span.
+func (s snapshot) SpanContext() trace.SpanContext {
+	return s.spanContext
+}
+
+// Parent returns the unique SpanContext that identifies the parent of the
+// span if one exists. If the span has no parent the returned SpanContext
+// will be invalid.
+func (s snapshot) Parent() trace.SpanContext {
+	return s.parent
+}
+
+// SpanKind returns the role the span plays in a Trace.
+func (s snapshot) SpanKind() trace.SpanKind {
+	return s.spanKind
+}
+
+// StartTime returns the time the span started recording.
+func (s snapshot) StartTime() time.Time {
+	return s.startTime
+}
+
+// EndTime returns the time the span stopped recording. It will be zero if
+// the span has not ended.
+func (s snapshot) EndTime() time.Time {
+	return s.endTime
+}
+
+// Attributes returns the defining attributes of the span.
+func (s snapshot) Attributes() []attribute.KeyValue {
+	return s.attributes
+}
+
+// Links returns all the links the span has to other spans.
+func (s snapshot) Links() []Link {
+	return s.links
+}
+
+// Events returns all the events that occurred within in the spans
+// lifetime.
+func (s snapshot) Events() []Event {
+	return s.events
+}
+
+// Status returns the spans status.
+func (s snapshot) Status() Status {
+	return s.status
+}
+
+// InstrumentationScope returns information about the instrumentation
+// scope that created the span.
+func (s snapshot) InstrumentationScope() instrumentation.Scope {
+	return s.instrumentationScope
+}
+
+// InstrumentationLibrary returns information about the instrumentation
+// library that created the span.
+func (s snapshot) InstrumentationLibrary() instrumentation.Library {
+	return s.instrumentationScope
+}
+
+// Resource returns information about the entity that produced the span.
+func (s snapshot) Resource() *resource.Resource {
+	return s.resource
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s snapshot) DroppedAttributes() int {
+	return s.droppedAttributeCount
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s snapshot) DroppedLinks() int {
+	return s.droppedLinkCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s snapshot) DroppedEvents() int {
+	return s.droppedEventCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s snapshot) ChildSpanCount() int {
+	return s.childSpanCount
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
new file mode 100644
index 000000000..9fb483a99
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -0,0 +1,828 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"runtime"
+	rt "runtime/trace"
+	"strings"
+	"sync"
+	"time"
+	"unicode/utf8"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/sdk/internal"
+	"go.opentelemetry.io/otel/sdk/resource"
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// ReadOnlySpan allows reading information from the data structure underlying a
+// trace.Span. It is used in places where reading information from a span is
+// necessary but changing the span isn't necessary or allowed.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadOnlySpan interface {
+	// Name returns the name of the span.
+	Name() string
+	// SpanContext returns the unique SpanContext that identifies the span.
+	SpanContext() trace.SpanContext
+	// Parent returns the unique SpanContext that identifies the parent of the
+	// span if one exists. If the span has no parent the returned SpanContext
+	// will be invalid.
+	Parent() trace.SpanContext
+	// SpanKind returns the role the span plays in a Trace.
+	SpanKind() trace.SpanKind
+	// StartTime returns the time the span started recording.
+	StartTime() time.Time
+	// EndTime returns the time the span stopped recording. It will be zero if
+	// the span has not ended.
+	EndTime() time.Time
+	// Attributes returns the defining attributes of the span.
+	// The order of the returned attributes is not guaranteed to be stable across invocations.
+	Attributes() []attribute.KeyValue
+	// Links returns all the links the span has to other spans.
+	Links() []Link
+	// Events returns all the events that occurred within in the spans
+	// lifetime.
+	Events() []Event
+	// Status returns the spans status.
+	Status() Status
+	// InstrumentationScope returns information about the instrumentation
+	// scope that created the span.
+	InstrumentationScope() instrumentation.Scope
+	// InstrumentationLibrary returns information about the instrumentation
+	// library that created the span.
+	// Deprecated: please use InstrumentationScope instead.
+	InstrumentationLibrary() instrumentation.Library
+	// Resource returns information about the entity that produced the span.
+	Resource() *resource.Resource
+	// DroppedAttributes returns the number of attributes dropped by the span
+	// due to limits being reached.
+	DroppedAttributes() int
+	// DroppedLinks returns the number of links dropped by the span due to
+	// limits being reached.
+	DroppedLinks() int
+	// DroppedEvents returns the number of events dropped by the span due to
+	// limits being reached.
+	DroppedEvents() int
+	// ChildSpanCount returns the count of spans that consider the span a
+	// direct parent.
+	ChildSpanCount() int
+
+	// A private method to prevent users implementing the
+	// interface and so future additions to it will not
+	// violate compatibility.
+	private()
+}
+
+// ReadWriteSpan exposes the same methods as trace.Span and in addition allows
+// reading information from the underlying data structure.
+// This interface exposes the union of the methods of trace.Span (which is a
+// "write-only" span) and ReadOnlySpan. New methods for writing or reading span
+// information should be added under trace.Span or ReadOnlySpan, respectively.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadWriteSpan interface {
+	trace.Span
+	ReadOnlySpan
+}
+
+// recordingSpan is an implementation of the OpenTelemetry Span API
+// representing the individual component of a trace that is sampled.
+type recordingSpan struct {
+	// mu protects the contents of this span.
+	mu sync.Mutex
+
+	// parent holds the parent span of this span as a trace.SpanContext.
+	parent trace.SpanContext
+
+	// spanKind represents the kind of this span as a trace.SpanKind.
+	spanKind trace.SpanKind
+
+	// name is the name of this span.
+	name string
+
+	// startTime is the time at which this span was started.
+	startTime time.Time
+
+	// endTime is the time at which this span was ended. It contains the zero
+	// value of time.Time until the span is ended.
+	endTime time.Time
+
+	// status is the status of this span.
+	status Status
+
+	// childSpanCount holds the number of child spans created for this span.
+	childSpanCount int
+
+	// spanContext holds the SpanContext of this span.
+	spanContext trace.SpanContext
+
+	// attributes is a collection of user provided key/values. The collection
+	// is constrained by a configurable maximum held by the parent
+	// TracerProvider. When additional attributes are added after this maximum
+	// is reached these attributes the user is attempting to add are dropped.
+	// This dropped number of attributes is tracked and reported in the
+	// ReadOnlySpan exported when the span ends.
+	attributes        []attribute.KeyValue
+	droppedAttributes int
+
+	// events are stored in FIFO queue capped by configured limit.
+	events evictedQueue
+
+	// links are stored in FIFO queue capped by configured limit.
+	links evictedQueue
+
+	// executionTracerTaskEnd ends the execution tracer span.
+	executionTracerTaskEnd func()
+
+	// tracer is the SDK tracer that created this span.
+	tracer *tracer
+}
+
+var _ ReadWriteSpan = (*recordingSpan)(nil)
+var _ runtimeTracer = (*recordingSpan)(nil)
+
+// SpanContext returns the SpanContext of this span.
+func (s *recordingSpan) SpanContext() trace.SpanContext {
+	if s == nil {
+		return trace.SpanContext{}
+	}
+	return s.spanContext
+}
+
+// IsRecording returns if this span is being recorded. If this span has ended
+// this will return false.
+func (s *recordingSpan) IsRecording() bool {
+	if s == nil {
+		return false
+	}
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	return s.endTime.IsZero()
+}
+
+// SetStatus sets the status of the Span in the form of a code and a
+// description, overriding previous values set. The description is only
+// included in the set status when the code is for an error. If this span is
+// not being recorded than this method does nothing.
+func (s *recordingSpan) SetStatus(code codes.Code, description string) {
+	if !s.IsRecording() {
+		return
+	}
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.status.Code > code {
+		return
+	}
+
+	status := Status{Code: code}
+	if code == codes.Error {
+		status.Description = description
+	}
+
+	s.status = status
+}
+
+// SetAttributes sets attributes of this span.
+//
+// If a key from attributes already exists the value associated with that key
+// will be overwritten with the value contained in attributes.
+//
+// If this span is not being recorded than this method does nothing.
+//
+// If adding attributes to the span would exceed the maximum amount of
+// attributes the span is configured to have, the last added attributes will
+// be dropped.
+func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
+	if !s.IsRecording() {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	limit := s.tracer.provider.spanLimits.AttributeCountLimit
+	if limit == 0 {
+		// No attributes allowed.
+		s.droppedAttributes += len(attributes)
+		return
+	}
+
+	// If adding these attributes could exceed the capacity of s perform a
+	// de-duplication and truncation while adding to avoid over allocation.
+	if limit > 0 && len(s.attributes)+len(attributes) > limit {
+		s.addOverCapAttrs(limit, attributes)
+		return
+	}
+
+	// Otherwise, add without deduplication. When attributes are read they
+	// will be deduplicated, optimizing the operation.
+	for _, a := range attributes {
+		if !a.Valid() {
+			// Drop all invalid attributes.
+			s.droppedAttributes++
+			continue
+		}
+		a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+		s.attributes = append(s.attributes, a)
+	}
+}
+
+// addOverCapAttrs adds the attributes attrs to the span s while
+// de-duplicating the attributes of s and attrs and dropping attributes that
+// exceed the limit.
+//
+// This method assumes s.mu.Lock is held by the caller.
+//
+// This method should only be called when there is a possibility that adding
+// attrs to s will exceed the limit. Otherwise, attrs should be added to s
+// without checking for duplicates and all retrieval methods of the attributes
+// for s will de-duplicate as needed.
+//
+// This method assumes limit is a value > 0. The argument should be validated
+// by the caller.
+func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
+	// In order to not allocate more capacity to s.attributes than needed,
+	// prune and truncate this addition of attributes while adding.
+
+	// Do not set a capacity when creating this map. Benchmark testing has
+	// showed this to only add unused memory allocations in general use.
+	exists := make(map[attribute.Key]int)
+	s.dedupeAttrsFromRecord(&exists)
+
+	// Now that s.attributes is deduplicated, adding unique attributes up to
+	// the capacity of s will not over allocate s.attributes.
+	for _, a := range attrs {
+		if !a.Valid() {
+			// Drop all invalid attributes.
+			s.droppedAttributes++
+			continue
+		}
+
+		if idx, ok := exists[a.Key]; ok {
+			// Perform all updates before dropping, even when at capacity.
+			s.attributes[idx] = a
+			continue
+		}
+
+		if len(s.attributes) >= limit {
+			// Do not just drop all of the remaining attributes, make sure
+			// updates are checked and performed.
+			s.droppedAttributes++
+		} else {
+			a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+			s.attributes = append(s.attributes, a)
+			exists[a.Key] = len(s.attributes) - 1
+		}
+	}
+}
+
+// truncateAttr returns a truncated version of attr. Only string and string
+// slice attribute values are truncated. String values are truncated to at
+// most a length of limit. Each string slice value is truncated in this fashion
+// (the slice length itself is unaffected).
+//
+// No truncation is perfromed for a negative limit.
+func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue {
+	if limit < 0 {
+		return attr
+	}
+	switch attr.Value.Type() {
+	case attribute.STRING:
+		if v := attr.Value.AsString(); len(v) > limit {
+			return attr.Key.String(safeTruncate(v, limit))
+		}
+	case attribute.STRINGSLICE:
+		v := attr.Value.AsStringSlice()
+		for i := range v {
+			if len(v[i]) > limit {
+				v[i] = safeTruncate(v[i], limit)
+			}
+		}
+		return attr.Key.StringSlice(v)
+	}
+	return attr
+}
+
+// safeTruncate truncates the string and guarantees valid UTF-8 is returned.
+func safeTruncate(input string, limit int) string {
+	if trunc, ok := safeTruncateValidUTF8(input, limit); ok {
+		return trunc
+	}
+	trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit)
+	return trunc
+}
+
+// safeTruncateValidUTF8 returns a copy of the input string safely truncated to
+// limit. The truncation is ensured to occur at the bounds of complete UTF-8
+// characters. If invalid encoding of UTF-8 is encountered, input is returned
+// with false, otherwise, the truncated input will be returned with true.
+func safeTruncateValidUTF8(input string, limit int) (string, bool) {
+	for cnt := 0; cnt <= limit; {
+		r, size := utf8.DecodeRuneInString(input[cnt:])
+		if r == utf8.RuneError {
+			return input, false
+		}
+
+		if cnt+size > limit {
+			return input[:cnt], true
+		}
+		cnt += size
+	}
+	return input, true
+}
+
+// End ends the span. This method does nothing if the span is already ended or
+// is not being recorded.
+//
+// The only SpanOption currently supported is WithTimestamp which will set the
+// end time for a Span's life-cycle.
+//
+// If this method is called while panicking an error event is added to the
+// Span before ending it and the panic is continued.
+func (s *recordingSpan) End(options ...trace.SpanEndOption) {
+	// Do not start by checking if the span is being recorded which requires
+	// acquiring a lock. Make a minimal check that the span is not nil.
+	if s == nil {
+		return
+	}
+
+	// Store the end time as soon as possible to avoid artificially increasing
+	// the span's duration in case some operation below takes a while.
+	et := internal.MonotonicEndTime(s.startTime)
+
+	// Do relative expensive check now that we have an end time and see if we
+	// need to do any more processing.
+	if !s.IsRecording() {
+		return
+	}
+
+	config := trace.NewSpanEndConfig(options...)
+	if recovered := recover(); recovered != nil {
+		// Record but don't stop the panic.
+		defer panic(recovered)
+		opts := []trace.EventOption{
+			trace.WithAttributes(
+				semconv.ExceptionType(typeStr(recovered)),
+				semconv.ExceptionMessage(fmt.Sprint(recovered)),
+			),
+		}
+
+		if config.StackTrace() {
+			opts = append(opts, trace.WithAttributes(
+				semconv.ExceptionStacktrace(recordStackTrace()),
+			))
+		}
+
+		s.addEvent(semconv.ExceptionEventName, opts...)
+	}
+
+	if s.executionTracerTaskEnd != nil {
+		s.executionTracerTaskEnd()
+	}
+
+	s.mu.Lock()
+	// Setting endTime to non-zero marks the span as ended and not recording.
+	if config.Timestamp().IsZero() {
+		s.endTime = et
+	} else {
+		s.endTime = config.Timestamp()
+	}
+	s.mu.Unlock()
+
+	sps := s.tracer.provider.spanProcessors.Load().(spanProcessorStates)
+	if len(sps) == 0 {
+		return
+	}
+	snap := s.snapshot()
+	for _, sp := range sps {
+		sp.sp.OnEnd(snap)
+	}
+}
+
+// RecordError will record err as a span event for this span. An additional call to
+// SetStatus is required if the Status of the Span should be set to Error, this method
+// does not change the Span status. If this span is not being recorded or err is nil
+// than this method does nothing.
+func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
+	if s == nil || err == nil || !s.IsRecording() {
+		return
+	}
+
+	opts = append(opts, trace.WithAttributes(
+		semconv.ExceptionType(typeStr(err)),
+		semconv.ExceptionMessage(err.Error()),
+	))
+
+	c := trace.NewEventConfig(opts...)
+	if c.StackTrace() {
+		opts = append(opts, trace.WithAttributes(
+			semconv.ExceptionStacktrace(recordStackTrace()),
+		))
+	}
+
+	s.addEvent(semconv.ExceptionEventName, opts...)
+}
+
+func typeStr(i interface{}) string {
+	t := reflect.TypeOf(i)
+	if t.PkgPath() == "" && t.Name() == "" {
+		// Likely a builtin type.
+		return t.String()
+	}
+	return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func recordStackTrace() string {
+	stackTrace := make([]byte, 2048)
+	n := runtime.Stack(stackTrace, false)
+
+	return string(stackTrace[0:n])
+}
+
+// AddEvent adds an event with the provided name and options. If this span is
+// not being recorded than this method does nothing.
+func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
+	if !s.IsRecording() {
+		return
+	}
+	s.addEvent(name, o...)
+}
+
+func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
+	c := trace.NewEventConfig(o...)
+	e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
+
+	// Discard attributes over limit.
+	limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit
+	if limit == 0 {
+		// Drop all attributes.
+		e.DroppedAttributeCount = len(e.Attributes)
+		e.Attributes = nil
+	} else if limit > 0 && len(e.Attributes) > limit {
+		// Drop over capacity.
+		e.DroppedAttributeCount = len(e.Attributes) - limit
+		e.Attributes = e.Attributes[:limit]
+	}
+
+	s.mu.Lock()
+	s.events.add(e)
+	s.mu.Unlock()
+}
+
+// SetName sets the name of this span. If this span is not being recorded than
+// this method does nothing.
+func (s *recordingSpan) SetName(name string) {
+	if !s.IsRecording() {
+		return
+	}
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.name = name
+}
+
+// Name returns the name of this span.
+func (s *recordingSpan) Name() string {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.name
+}
+
+// Name returns the SpanContext of this span's parent span.
+func (s *recordingSpan) Parent() trace.SpanContext {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.parent
+}
+
+// SpanKind returns the SpanKind of this span.
+func (s *recordingSpan) SpanKind() trace.SpanKind {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.spanKind
+}
+
+// StartTime returns the time this span started.
+func (s *recordingSpan) StartTime() time.Time {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.startTime
+}
+
+// EndTime returns the time this span ended. For spans that have not yet
+// ended, the returned value will be the zero value of time.Time.
+func (s *recordingSpan) EndTime() time.Time {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.endTime
+}
+
+// Attributes returns the attributes of this span.
+//
+// The order of the returned attributes is not guaranteed to be stable.
+func (s *recordingSpan) Attributes() []attribute.KeyValue {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.dedupeAttrs()
+	return s.attributes
+}
+
+// dedupeAttrs deduplicates the attributes of s to fit capacity.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrs() {
+	// Do not set a capacity when creating this map. Benchmark testing has
+	// showed this to only add unused memory allocations in general use.
+	exists := make(map[attribute.Key]int)
+	s.dedupeAttrsFromRecord(&exists)
+}
+
+// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
+// using record as the record of unique attribute keys to their index.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) {
+	// Use the fact that slices share the same backing array.
+	unique := s.attributes[:0]
+	for _, a := range s.attributes {
+		if idx, ok := (*record)[a.Key]; ok {
+			unique[idx] = a
+		} else {
+			unique = append(unique, a)
+			(*record)[a.Key] = len(unique) - 1
+		}
+	}
+	// s.attributes have element types of attribute.KeyValue. These types are
+	// not pointers and they themselves do not contain pointer fields,
+	// therefore the duplicate values do not need to be zeroed for them to be
+	// garbage collected.
+	s.attributes = unique
+}
+
+// Links returns the links of this span.
+func (s *recordingSpan) Links() []Link {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if len(s.links.queue) == 0 {
+		return []Link{}
+	}
+	return s.interfaceArrayToLinksArray()
+}
+
+// Events returns the events of this span.
+func (s *recordingSpan) Events() []Event {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if len(s.events.queue) == 0 {
+		return []Event{}
+	}
+	return s.interfaceArrayToEventArray()
+}
+
+// Status returns the status of this span.
+func (s *recordingSpan) Status() Status {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.status
+}
+
+// InstrumentationScope returns the instrumentation.Scope associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.tracer.instrumentationScope
+}
+
+// InstrumentationLibrary returns the instrumentation.Library associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.tracer.instrumentationScope
+}
+
+// Resource returns the Resource associated with the Tracer that created this
+// span.
+func (s *recordingSpan) Resource() *resource.Resource {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.tracer.provider.resource
+}
+
+func (s *recordingSpan) addLink(link trace.Link) {
+	if !s.IsRecording() || !link.SpanContext.IsValid() {
+		return
+	}
+
+	l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
+
+	// Discard attributes over limit.
+	limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit
+	if limit == 0 {
+		// Drop all attributes.
+		l.DroppedAttributeCount = len(l.Attributes)
+		l.Attributes = nil
+	} else if limit > 0 && len(l.Attributes) > limit {
+		l.DroppedAttributeCount = len(l.Attributes) - limit
+		l.Attributes = l.Attributes[:limit]
+	}
+
+	s.mu.Lock()
+	s.links.add(l)
+	s.mu.Unlock()
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s *recordingSpan) DroppedAttributes() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.droppedAttributes
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s *recordingSpan) DroppedLinks() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.links.droppedCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s *recordingSpan) DroppedEvents() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.events.droppedCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s *recordingSpan) ChildSpanCount() int {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.childSpanCount
+}
+
+// TracerProvider returns a trace.TracerProvider that can be used to generate
+// additional Spans on the same telemetry pipeline as the current Span.
+func (s *recordingSpan) TracerProvider() trace.TracerProvider {
+	return s.tracer.provider
+}
+
+// snapshot creates a read-only copy of the current state of the span.
+func (s *recordingSpan) snapshot() ReadOnlySpan {
+	var sd snapshot
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	sd.endTime = s.endTime
+	sd.instrumentationScope = s.tracer.instrumentationScope
+	sd.name = s.name
+	sd.parent = s.parent
+	sd.resource = s.tracer.provider.resource
+	sd.spanContext = s.spanContext
+	sd.spanKind = s.spanKind
+	sd.startTime = s.startTime
+	sd.status = s.status
+	sd.childSpanCount = s.childSpanCount
+
+	if len(s.attributes) > 0 {
+		s.dedupeAttrs()
+		sd.attributes = s.attributes
+	}
+	sd.droppedAttributeCount = s.droppedAttributes
+	if len(s.events.queue) > 0 {
+		sd.events = s.interfaceArrayToEventArray()
+		sd.droppedEventCount = s.events.droppedCount
+	}
+	if len(s.links.queue) > 0 {
+		sd.links = s.interfaceArrayToLinksArray()
+		sd.droppedLinkCount = s.links.droppedCount
+	}
+	return &sd
+}
+
+func (s *recordingSpan) interfaceArrayToLinksArray() []Link {
+	linkArr := make([]Link, 0)
+	for _, value := range s.links.queue {
+		linkArr = append(linkArr, value.(Link))
+	}
+	return linkArr
+}
+
+func (s *recordingSpan) interfaceArrayToEventArray() []Event {
+	eventArr := make([]Event, 0)
+	for _, value := range s.events.queue {
+		eventArr = append(eventArr, value.(Event))
+	}
+	return eventArr
+}
+
+func (s *recordingSpan) addChild() {
+	if !s.IsRecording() {
+		return
+	}
+	s.mu.Lock()
+	s.childSpanCount++
+	s.mu.Unlock()
+}
+
+func (*recordingSpan) private() {}
+
+// runtimeTrace starts a "runtime/trace".Task for the span and returns a
+// context containing the task.
+func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
+	if !rt.IsEnabled() {
+		// Avoid additional overhead if runtime/trace is not enabled.
+		return ctx
+	}
+	nctx, task := rt.NewTask(ctx, s.name)
+
+	s.mu.Lock()
+	s.executionTracerTaskEnd = task.End
+	s.mu.Unlock()
+
+	return nctx
+}
+
+// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API
+// that wraps a SpanContext. It performs no operations other than to return
+// the wrapped SpanContext or TracerProvider that created it.
+type nonRecordingSpan struct {
+	// tracer is the SDK tracer that created this span.
+	tracer *tracer
+	sc     trace.SpanContext
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+// TracerProvider returns the trace.TracerProvider that provided the Tracer
+// that created this span.
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
+
+func isRecording(s SamplingResult) bool {
+	return s.Decision == RecordOnly || s.Decision == RecordAndSample
+}
+
+func isSampled(s SamplingResult) bool {
+	return s.Decision == RecordAndSample
+}
+
+// Status is the classified state of a Span.
+type Status struct {
+	// Code is an identifier of a Spans state classification.
+	Code codes.Code
+	// Description is a user hint about why that status was set. It is only
+	// applicable when Code is Error.
+	Description string
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
new file mode 100644
index 000000000..9fb3d6eac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "context"
+
+// SpanExporter handles the delivery of spans to external receivers. This is
+// the final component in the trace export pipeline.
+type SpanExporter interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ExportSpans exports a batch of spans.
+	//
+	// This function is called synchronously, so there is no concurrency
+	// safety requirement. However, due to the synchronous calling pattern,
+	// it is critical that all timeouts and cancellations contained in the
+	// passed context must be honored.
+	//
+	// Any retry logic must be contained in this function. The SDK that
+	// calls this function will not implement any retry logic. All errors
+	// returned by this function are considered unrecoverable and will be
+	// reported to a configured error Handler.
+	ExportSpans(ctx context.Context, spans []ReadOnlySpan) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown notifies the exporter of a pending halt to operations. The
+	// exporter is expected to preform any cleanup or synchronization it
+	// requires while honoring all timeouts and cancellations contained in
+	// the passed context.
+	Shutdown(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
new file mode 100644
index 000000000..aa4d4221d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
@@ -0,0 +1,125 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "go.opentelemetry.io/otel/sdk/internal/env"
+
+const (
+	// DefaultAttributeValueLengthLimit is the default maximum allowed
+	// attribute value length, unlimited.
+	DefaultAttributeValueLengthLimit = -1
+
+	// DefaultAttributeCountLimit is the default maximum number of attributes
+	// a span can have.
+	DefaultAttributeCountLimit = 128
+
+	// DefaultEventCountLimit is the default maximum number of events a span
+	// can have.
+	DefaultEventCountLimit = 128
+
+	// DefaultLinkCountLimit is the default maximum number of links a span can
+	// have.
+	DefaultLinkCountLimit = 128
+
+	// DefaultAttributePerEventCountLimit is the default maximum number of
+	// attributes a span event can have.
+	DefaultAttributePerEventCountLimit = 128
+
+	// DefaultAttributePerLinkCountLimit is the default maximum number of
+	// attributes a span link can have.
+	DefaultAttributePerLinkCountLimit = 128
+)
+
+// SpanLimits represents the limits of a span.
+type SpanLimits struct {
+	// AttributeValueLengthLimit is the maximum allowed attribute value length.
+	//
+	// This limit only applies to string and string slice attribute values.
+	// Any string longer than this value will be truncated to this length.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributeValueLengthLimit int
+
+	// AttributeCountLimit is the maximum allowed span attribute count. Any
+	// attribute added to a span once this limit is reached will be dropped.
+	//
+	// Setting this to zero means no attributes will be recorded.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributeCountLimit int
+
+	// EventCountLimit is the maximum allowed span event count. Any event
+	// added to a span once this limit is reached means it will be added but
+	// the oldest event will be dropped.
+	//
+	// Setting this to zero means no events we be recorded.
+	//
+	// Setting this to a negative value means no limit is applied.
+	EventCountLimit int
+
+	// LinkCountLimit is the maximum allowed span link count. Any link added
+	// to a span once this limit is reached means it will be added but the
+	// oldest link will be dropped.
+	//
+	// Setting this to zero means no links we be recorded.
+	//
+	// Setting this to a negative value means no limit is applied.
+	LinkCountLimit int
+
+	// AttributePerEventCountLimit is the maximum number of attributes allowed
+	// per span event. Any attribute added after this limit reached will be
+	// dropped.
+	//
+	// Setting this to zero means no attributes will be recorded for events.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributePerEventCountLimit int
+
+	// AttributePerLinkCountLimit is the maximum number of attributes allowed
+	// per span link. Any attribute added after this limit reached will be
+	// dropped.
+	//
+	// Setting this to zero means no attributes will be recorded for links.
+	//
+	// Setting this to a negative value means no limit is applied.
+	AttributePerLinkCountLimit int
+}
+
+// NewSpanLimits returns a SpanLimits with all limits set to the value their
+// corresponding environment variable holds, or the default if unset.
+//
+// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
+// (default: unlimited)
+//
+// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128)
+//
+// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128)
+//
+// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default:
+// 128)
+//
+// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128)
+//
+// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128)
+func NewSpanLimits() SpanLimits {
+	return SpanLimits{
+		AttributeValueLengthLimit:   env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit),
+		AttributeCountLimit:         env.SpanAttributeCount(DefaultAttributeCountLimit),
+		EventCountLimit:             env.SpanEventCount(DefaultEventCountLimit),
+		LinkCountLimit:              env.SpanLinkCount(DefaultLinkCountLimit),
+		AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit),
+		AttributePerLinkCountLimit:  env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit),
+	}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
new file mode 100644
index 000000000..e6ae19352
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
@@ -0,0 +1,72 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"sync"
+)
+
+// SpanProcessor is a processing pipeline for spans in the trace signal.
+// SpanProcessors registered with a TracerProvider and are called at the start
+// and end of a Span's lifecycle, and are called in the order they are
+// registered.
+type SpanProcessor interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// OnStart is called when a span is started. It is called synchronously
+	// and should not block.
+	OnStart(parent context.Context, s ReadWriteSpan)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// OnEnd is called when span is finished. It is called synchronously and
+	// hence not block.
+	OnEnd(s ReadOnlySpan)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// Shutdown is called when the SDK shuts down. Any cleanup or release of
+	// resources held by the processor should be done in this call.
+	//
+	// Calls to OnStart, OnEnd, or ForceFlush after this has been called
+	// should be ignored.
+	//
+	// All timeouts and cancellations contained in ctx must be honored, this
+	// should not block indefinitely.
+	Shutdown(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	// ForceFlush exports all ended spans to the configured Exporter that have not yet
+	// been exported.  It should only be called when absolutely necessary, such as when
+	// using a FaaS provider that may suspend the process after an invocation, but before
+	// the Processor can export the completed spans.
+	ForceFlush(ctx context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
+type spanProcessorState struct {
+	sp    SpanProcessor
+	state *sync.Once
+}
+
+func newSpanProcessorState(sp SpanProcessor) *spanProcessorState {
+	return &spanProcessorState{sp: sp, state: &sync.Once{}}
+}
+
+type spanProcessorStates []*spanProcessorState
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
new file mode 100644
index 000000000..f17d924b8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -0,0 +1,161 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+	"context"
+	"time"
+
+	"go.opentelemetry.io/otel/sdk/instrumentation"
+	"go.opentelemetry.io/otel/trace"
+)
+
+type tracer struct {
+	provider             *TracerProvider
+	instrumentationScope instrumentation.Scope
+}
+
+var _ trace.Tracer = &tracer{}
+
+// Start starts a Span and returns it along with a context containing it.
+//
+// The Span is created with the provided name and as a child of any existing
+// span context found in the passed context. The created Span will be
+// configured appropriately by any SpanOption passed.
+func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) {
+	config := trace.NewSpanStartConfig(options...)
+
+	if ctx == nil {
+		// Prevent trace.ContextWithSpan from panicking.
+		ctx = context.Background()
+	}
+
+	// For local spans created by this SDK, track child span count.
+	if p := trace.SpanFromContext(ctx); p != nil {
+		if sdkSpan, ok := p.(*recordingSpan); ok {
+			sdkSpan.addChild()
+		}
+	}
+
+	s := tr.newSpan(ctx, name, &config)
+	if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
+		sps := tr.provider.spanProcessors.Load().(spanProcessorStates)
+		for _, sp := range sps {
+			sp.sp.OnStart(ctx, rw)
+		}
+	}
+	if rtt, ok := s.(runtimeTracer); ok {
+		ctx = rtt.runtimeTrace(ctx)
+	}
+
+	return trace.ContextWithSpan(ctx, s), s
+}
+
+type runtimeTracer interface {
+	// runtimeTrace starts a "runtime/trace".Task for the span and
+	// returns a context containing the task.
+	runtimeTrace(ctx context.Context) context.Context
+}
+
+// newSpan returns a new configured span.
+func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {
+	// If told explicitly to make this a new root use a zero value SpanContext
+	// as a parent which contains an invalid trace ID and is not remote.
+	var psc trace.SpanContext
+	if config.NewRoot() {
+		ctx = trace.ContextWithSpanContext(ctx, psc)
+	} else {
+		psc = trace.SpanContextFromContext(ctx)
+	}
+
+	// If there is a valid parent trace ID, use it to ensure the continuity of
+	// the trace. Always generate a new span ID so other components can rely
+	// on a unique span ID, even if the Span is non-recording.
+	var tid trace.TraceID
+	var sid trace.SpanID
+	if !psc.TraceID().IsValid() {
+		tid, sid = tr.provider.idGenerator.NewIDs(ctx)
+	} else {
+		tid = psc.TraceID()
+		sid = tr.provider.idGenerator.NewSpanID(ctx, tid)
+	}
+
+	samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{
+		ParentContext: ctx,
+		TraceID:       tid,
+		Name:          name,
+		Kind:          config.SpanKind(),
+		Attributes:    config.Attributes(),
+		Links:         config.Links(),
+	})
+
+	scc := trace.SpanContextConfig{
+		TraceID:    tid,
+		SpanID:     sid,
+		TraceState: samplingResult.Tracestate,
+	}
+	if isSampled(samplingResult) {
+		scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled
+	} else {
+		scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled
+	}
+	sc := trace.NewSpanContext(scc)
+
+	if !isRecording(samplingResult) {
+		return tr.newNonRecordingSpan(sc)
+	}
+	return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
+}
+
+// newRecordingSpan returns a new configured recordingSpan.
+func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan {
+	startTime := config.Timestamp()
+	if startTime.IsZero() {
+		startTime = time.Now()
+	}
+
+	s := &recordingSpan{
+		// Do not pre-allocate the attributes slice here! Doing so will
+		// allocate memory that is likely never going to be used, or if used,
+		// will be over-sized. The default Go compiler has been tested to
+		// dynamically allocate needed space very well. Benchmarking has shown
+		// it to be more performant than what we can predetermine here,
+		// especially for the common use case of few to no added
+		// attributes.
+
+		parent:      psc,
+		spanContext: sc,
+		spanKind:    trace.ValidateSpanKind(config.SpanKind()),
+		name:        name,
+		startTime:   startTime,
+		events:      newEvictedQueue(tr.provider.spanLimits.EventCountLimit),
+		links:       newEvictedQueue(tr.provider.spanLimits.LinkCountLimit),
+		tracer:      tr,
+	}
+
+	for _, l := range config.Links() {
+		s.addLink(l)
+	}
+
+	s.SetAttributes(sr.Attributes...)
+	s.SetAttributes(config.Attributes()...)
+
+	return s
+}
+
+// newNonRecordingSpan returns a new configured nonRecordingSpan.
+func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
+	return nonRecordingSpan{tracer: tr, sc: sc}
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go
new file mode 100644
index 000000000..b580eedef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go
@@ -0,0 +1,336 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/semconv/internal"
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+	"strconv"
+	"strings"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// SemanticConventions are the semantic convention values defined for a
+// version of the OpenTelemetry specification.
+type SemanticConventions struct {
+	EnduserIDKey                attribute.Key
+	HTTPClientIPKey             attribute.Key
+	HTTPFlavorKey               attribute.Key
+	HTTPHostKey                 attribute.Key
+	HTTPMethodKey               attribute.Key
+	HTTPRequestContentLengthKey attribute.Key
+	HTTPRouteKey                attribute.Key
+	HTTPSchemeHTTP              attribute.KeyValue
+	HTTPSchemeHTTPS             attribute.KeyValue
+	HTTPServerNameKey           attribute.Key
+	HTTPStatusCodeKey           attribute.Key
+	HTTPTargetKey               attribute.Key
+	HTTPURLKey                  attribute.Key
+	HTTPUserAgentKey            attribute.Key
+	NetHostIPKey                attribute.Key
+	NetHostNameKey              attribute.Key
+	NetHostPortKey              attribute.Key
+	NetPeerIPKey                attribute.Key
+	NetPeerNameKey              attribute.Key
+	NetPeerPortKey              attribute.Key
+	NetTransportIP              attribute.KeyValue
+	NetTransportOther           attribute.KeyValue
+	NetTransportTCP             attribute.KeyValue
+	NetTransportUDP             attribute.KeyValue
+	NetTransportUnix            attribute.KeyValue
+}
+
+// NetAttributesFromHTTPRequest generates attributes of the net
+// namespace as specified by the OpenTelemetry specification for a
+// span.  The network parameter is a string that net.Dial function
+// from standard library can understand.
+func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
+	attrs := []attribute.KeyValue{}
+
+	switch network {
+	case "tcp", "tcp4", "tcp6":
+		attrs = append(attrs, sc.NetTransportTCP)
+	case "udp", "udp4", "udp6":
+		attrs = append(attrs, sc.NetTransportUDP)
+	case "ip", "ip4", "ip6":
+		attrs = append(attrs, sc.NetTransportIP)
+	case "unix", "unixgram", "unixpacket":
+		attrs = append(attrs, sc.NetTransportUnix)
+	default:
+		attrs = append(attrs, sc.NetTransportOther)
+	}
+
+	peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr)
+	if peerIP != "" {
+		attrs = append(attrs, sc.NetPeerIPKey.String(peerIP))
+	}
+	if peerName != "" {
+		attrs = append(attrs, sc.NetPeerNameKey.String(peerName))
+	}
+	if peerPort != 0 {
+		attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort))
+	}
+
+	hostIP, hostName, hostPort := "", "", 0
+	for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} {
+		hostIP, hostName, hostPort = hostIPNamePort(someHost)
+		if hostIP != "" || hostName != "" || hostPort != 0 {
+			break
+		}
+	}
+	if hostIP != "" {
+		attrs = append(attrs, sc.NetHostIPKey.String(hostIP))
+	}
+	if hostName != "" {
+		attrs = append(attrs, sc.NetHostNameKey.String(hostName))
+	}
+	if hostPort != 0 {
+		attrs = append(attrs, sc.NetHostPortKey.Int(hostPort))
+	}
+
+	return attrs
+}
+
+// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort.
+// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized
+// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the
+// host portion will instead be returned in `name`.
+func hostIPNamePort(hostWithPort string) (ip string, name string, port int) {
+	var (
+		hostPart, portPart string
+		parsedPort         uint64
+		err                error
+	)
+	if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil {
+		hostPart, portPart = hostWithPort, ""
+	}
+	if parsedIP := net.ParseIP(hostPart); parsedIP != nil {
+		ip = parsedIP.String()
+	} else {
+		name = hostPart
+	}
+	if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil {
+		port = int(parsedPort)
+	}
+	return
+}
+
+// EndUserAttributesFromHTTPRequest generates attributes of the
+// enduser namespace as specified by the OpenTelemetry specification
+// for a span.
+func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+	if username, _, ok := request.BasicAuth(); ok {
+		return []attribute.KeyValue{sc.EnduserIDKey.String(username)}
+	}
+	return nil
+}
+
+// HTTPClientAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the client side.
+func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+	attrs := []attribute.KeyValue{}
+
+	// remove any username/password info that may be in the URL
+	// before adding it to the attributes
+	userinfo := request.URL.User
+	request.URL.User = nil
+
+	attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String()))
+
+	// restore any username/password info that was removed
+	request.URL.User = userinfo
+
+	return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...)
+}
+
+func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+	attrs := []attribute.KeyValue{}
+	if ua := request.UserAgent(); ua != "" {
+		attrs = append(attrs, sc.HTTPUserAgentKey.String(ua))
+	}
+	if request.ContentLength > 0 {
+		attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength))
+	}
+
+	return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...)
+}
+
+func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+	// as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality
+	attrs := []attribute.KeyValue{}
+
+	if request.TLS != nil {
+		attrs = append(attrs, sc.HTTPSchemeHTTPS)
+	} else {
+		attrs = append(attrs, sc.HTTPSchemeHTTP)
+	}
+
+	if request.Host != "" {
+		attrs = append(attrs, sc.HTTPHostKey.String(request.Host))
+	} else if request.URL != nil && request.URL.Host != "" {
+		attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host))
+	}
+
+	flavor := ""
+	if request.ProtoMajor == 1 {
+		flavor = fmt.Sprintf("1.%d", request.ProtoMinor)
+	} else if request.ProtoMajor == 2 {
+		flavor = "2"
+	}
+	if flavor != "" {
+		attrs = append(attrs, sc.HTTPFlavorKey.String(flavor))
+	}
+
+	if request.Method != "" {
+		attrs = append(attrs, sc.HTTPMethodKey.String(request.Method))
+	} else {
+		attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet))
+	}
+
+	return attrs
+}
+
+// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
+// to be used with server-side HTTP metrics.
+func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
+	attrs := []attribute.KeyValue{}
+	if serverName != "" {
+		attrs = append(attrs, sc.HTTPServerNameKey.String(serverName))
+	}
+	return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...)
+}
+
+// HTTPServerAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the server side. Currently, only basic authentication is
+// supported.
+func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
+	attrs := []attribute.KeyValue{
+		sc.HTTPTargetKey.String(request.RequestURI),
+	}
+
+	if serverName != "" {
+		attrs = append(attrs, sc.HTTPServerNameKey.String(serverName))
+	}
+	if route != "" {
+		attrs = append(attrs, sc.HTTPRouteKey.String(route))
+	}
+	if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 {
+		if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 {
+			attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0]))
+		}
+	}
+
+	return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...)
+}
+
+// HTTPAttributesFromHTTPStatusCode generates attributes of the http
+// namespace as specified by the OpenTelemetry specification for a
+// span.
+func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
+	attrs := []attribute.KeyValue{
+		sc.HTTPStatusCodeKey.Int(code),
+	}
+	return attrs
+}
+
+type codeRange struct {
+	fromInclusive int
+	toInclusive   int
+}
+
+func (r codeRange) contains(code int) bool {
+	return r.fromInclusive <= code && code <= r.toInclusive
+}
+
+var validRangesPerCategory = map[int][]codeRange{
+	1: {
+		{http.StatusContinue, http.StatusEarlyHints},
+	},
+	2: {
+		{http.StatusOK, http.StatusAlreadyReported},
+		{http.StatusIMUsed, http.StatusIMUsed},
+	},
+	3: {
+		{http.StatusMultipleChoices, http.StatusUseProxy},
+		{http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
+	},
+	4: {
+		{http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
+		{http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
+		{http.StatusPreconditionRequired, http.StatusTooManyRequests},
+		{http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
+		{http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
+	},
+	5: {
+		{http.StatusInternalServerError, http.StatusLoopDetected},
+		{http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
+	},
+}
+
+// SpanStatusFromHTTPStatusCode generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
+	spanCode, valid := validateHTTPStatusCode(code)
+	if !valid {
+		return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code)
+	}
+	return spanCode, ""
+}
+
+// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+// Exclude 4xx for SERVER to set the appropriate status.
+func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
+	spanCode, valid := validateHTTPStatusCode(code)
+	if !valid {
+		return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code)
+	}
+	category := code / 100
+	if spanKind == trace.SpanKindServer && category == 4 {
+		return codes.Unset, ""
+	}
+	return spanCode, ""
+}
+
+// validateHTTPStatusCode validates the HTTP status code and returns
+// corresponding span status code. If the `code` is not a valid HTTP status
+// code, returns span status Error and false.
+func validateHTTPStatusCode(code int) (codes.Code, bool) {
+	category := code / 100
+	ranges, ok := validRangesPerCategory[category]
+	if !ok {
+		return codes.Error, false
+	}
+	ok = false
+	for _, crange := range ranges {
+		ok = crange.contains(code)
+		if ok {
+			break
+		}
+	}
+	if !ok {
+		return codes.Error, false
+	}
+	if category > 0 && category < 4 {
+		return codes.Unset, true
+	}
+	return codes.Error, true
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
new file mode 100644
index 000000000..12d6b520f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
@@ -0,0 +1,404 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/semconv/internal/v2"
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+)
+
+// HTTPConv are the HTTP semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type HTTPConv struct {
+	NetConv *NetConv
+
+	EnduserIDKey                 attribute.Key
+	HTTPClientIPKey              attribute.Key
+	HTTPFlavorKey                attribute.Key
+	HTTPMethodKey                attribute.Key
+	HTTPRequestContentLengthKey  attribute.Key
+	HTTPResponseContentLengthKey attribute.Key
+	HTTPRouteKey                 attribute.Key
+	HTTPSchemeHTTP               attribute.KeyValue
+	HTTPSchemeHTTPS              attribute.KeyValue
+	HTTPStatusCodeKey            attribute.Key
+	HTTPTargetKey                attribute.Key
+	HTTPURLKey                   attribute.Key
+	HTTPUserAgentKey             attribute.Key
+}
+
+// ClientResponse returns attributes for an HTTP response received by a client
+// from a server. The following attributes are returned if the related values
+// are defined in resp: "http.status.code", "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+//	append(ClientResponse(resp), ClientRequest(resp.Request)...)
+func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue {
+	var n int
+	if resp.StatusCode > 0 {
+		n++
+	}
+	if resp.ContentLength > 0 {
+		n++
+	}
+
+	attrs := make([]attribute.KeyValue, 0, n)
+	if resp.StatusCode > 0 {
+		attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
+	}
+	if resp.ContentLength > 0 {
+		attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
+	}
+	return attrs
+}
+
+// ClientRequest returns attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.url", "http.flavor",
+// "http.method", "net.peer.name". The following attributes are returned if the
+// related values are defined in req: "net.peer.port", "http.user_agent",
+// "http.request_content_length", "enduser.id".
+func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue {
+	n := 3 // URL, peer name, proto, and method.
+	var h string
+	if req.URL != nil {
+		h = req.URL.Host
+	}
+	peer, p := firstHostPort(h, req.Header.Get("Host"))
+	port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
+	if port > 0 {
+		n++
+	}
+	useragent := req.UserAgent()
+	if useragent != "" {
+		n++
+	}
+	if req.ContentLength > 0 {
+		n++
+	}
+	userID, _, hasUserID := req.BasicAuth()
+	if hasUserID {
+		n++
+	}
+	attrs := make([]attribute.KeyValue, 0, n)
+
+	attrs = append(attrs, c.method(req.Method))
+	attrs = append(attrs, c.proto(req.Proto))
+
+	var u string
+	if req.URL != nil {
+		// Remove any username/password info that may be in the URL.
+		userinfo := req.URL.User
+		req.URL.User = nil
+		u = req.URL.String()
+		// Restore any username/password info that was removed.
+		req.URL.User = userinfo
+	}
+	attrs = append(attrs, c.HTTPURLKey.String(u))
+
+	attrs = append(attrs, c.NetConv.PeerName(peer))
+	if port > 0 {
+		attrs = append(attrs, c.NetConv.PeerPort(port))
+	}
+
+	if useragent != "" {
+		attrs = append(attrs, c.HTTPUserAgentKey.String(useragent))
+	}
+
+	if l := req.ContentLength; l > 0 {
+		attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
+	}
+
+	if hasUserID {
+		attrs = append(attrs, c.EnduserIDKey.String(userID))
+	}
+
+	return attrs
+}
+
+// ServerRequest returns attributes for an HTTP request received by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.flavor", "http.target", "net.host.name". The following attributes are
+// returned if they related values are defined in req: "net.host.port",
+// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
+// "http.client_ip".
+func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue {
+	// TODO: This currently does not add the specification required
+	// `http.target` attribute. It has too high of a cardinality to safely be
+	// added. An alternate should be added, or this comment removed, when it is
+	// addressed by the specification. If it is ultimately decided to continue
+	// not including the attribute, the HTTPTargetKey field of the HTTPConv
+	// should be removed as well.
+
+	n := 4 // Method, scheme, proto, and host name.
+	var host string
+	var p int
+	if server == "" {
+		host, p = splitHostPort(req.Host)
+	} else {
+		// Prioritize the primary server name.
+		host, p = splitHostPort(server)
+		if p < 0 {
+			_, p = splitHostPort(req.Host)
+		}
+	}
+	hostPort := requiredHTTPPort(req.TLS != nil, p)
+	if hostPort > 0 {
+		n++
+	}
+	peer, peerPort := splitHostPort(req.RemoteAddr)
+	if peer != "" {
+		n++
+		if peerPort > 0 {
+			n++
+		}
+	}
+	useragent := req.UserAgent()
+	if useragent != "" {
+		n++
+	}
+	userID, _, hasUserID := req.BasicAuth()
+	if hasUserID {
+		n++
+	}
+	clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
+	if clientIP != "" {
+		n++
+	}
+	attrs := make([]attribute.KeyValue, 0, n)
+
+	attrs = append(attrs, c.method(req.Method))
+	attrs = append(attrs, c.scheme(req.TLS != nil))
+	attrs = append(attrs, c.proto(req.Proto))
+	attrs = append(attrs, c.NetConv.HostName(host))
+
+	if hostPort > 0 {
+		attrs = append(attrs, c.NetConv.HostPort(hostPort))
+	}
+
+	if peer != "" {
+		// The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
+		// file-path that would be interpreted with a sock family.
+		attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
+		if peerPort > 0 {
+			attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
+		}
+	}
+
+	if useragent != "" {
+		attrs = append(attrs, c.HTTPUserAgentKey.String(useragent))
+	}
+
+	if hasUserID {
+		attrs = append(attrs, c.EnduserIDKey.String(userID))
+	}
+
+	if clientIP != "" {
+		attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
+	}
+
+	return attrs
+}
+
+func (c *HTTPConv) method(method string) attribute.KeyValue {
+	if method == "" {
+		return c.HTTPMethodKey.String(http.MethodGet)
+	}
+	return c.HTTPMethodKey.String(method)
+}
+
+func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive
+	if https {
+		return c.HTTPSchemeHTTPS
+	}
+	return c.HTTPSchemeHTTP
+}
+
+func (c *HTTPConv) proto(proto string) attribute.KeyValue {
+	switch proto {
+	case "HTTP/1.0":
+		return c.HTTPFlavorKey.String("1.0")
+	case "HTTP/1.1":
+		return c.HTTPFlavorKey.String("1.1")
+	case "HTTP/2":
+		return c.HTTPFlavorKey.String("2.0")
+	case "HTTP/3":
+		return c.HTTPFlavorKey.String("3.0")
+	default:
+		return c.HTTPFlavorKey.String(proto)
+	}
+}
+
+func serverClientIP(xForwardedFor string) string {
+	if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
+		xForwardedFor = xForwardedFor[:idx]
+	}
+	return xForwardedFor
+}
+
+func requiredHTTPPort(https bool, port int) int { // nolint:revive
+	if https {
+		if port > 0 && port != 443 {
+			return port
+		}
+	} else {
+		if port > 0 && port != 80 {
+			return port
+		}
+	}
+	return -1
+}
+
+// Return the request host and port from the first non-empty source.
+func firstHostPort(source ...string) (host string, port int) {
+	for _, hostport := range source {
+		host, port = splitHostPort(hostport)
+		if host != "" || port > 0 {
+			break
+		}
+	}
+	return
+}
+
+// RequestHeader returns the contents of h as OpenTelemetry attributes.
+func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue {
+	return c.header("http.request.header", h)
+}
+
+// ResponseHeader returns the contents of h as OpenTelemetry attributes.
+func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue {
+	return c.header("http.response.header", h)
+}
+
+func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue {
+	key := func(k string) attribute.Key {
+		k = strings.ToLower(k)
+		k = strings.ReplaceAll(k, "-", "_")
+		k = fmt.Sprintf("%s.%s", prefix, k)
+		return attribute.Key(k)
+	}
+
+	attrs := make([]attribute.KeyValue, 0, len(h))
+	for k, v := range h {
+		attrs = append(attrs, key(k).StringSlice(v))
+	}
+	return attrs
+}
+
+// ClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) {
+	stat, valid := validateHTTPStatusCode(code)
+	if !valid {
+		return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
+	}
+	return stat, ""
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) {
+	stat, valid := validateHTTPStatusCode(code)
+	if !valid {
+		return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
+	}
+
+	if code/100 == 4 {
+		return codes.Unset, ""
+	}
+	return stat, ""
+}
+
+type codeRange struct {
+	fromInclusive int
+	toInclusive   int
+}
+
+func (r codeRange) contains(code int) bool {
+	return r.fromInclusive <= code && code <= r.toInclusive
+}
+
+var validRangesPerCategory = map[int][]codeRange{
+	1: {
+		{http.StatusContinue, http.StatusEarlyHints},
+	},
+	2: {
+		{http.StatusOK, http.StatusAlreadyReported},
+		{http.StatusIMUsed, http.StatusIMUsed},
+	},
+	3: {
+		{http.StatusMultipleChoices, http.StatusUseProxy},
+		{http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
+	},
+	4: {
+		{http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
+		{http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
+		{http.StatusPreconditionRequired, http.StatusTooManyRequests},
+		{http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
+		{http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
+	},
+	5: {
+		{http.StatusInternalServerError, http.StatusLoopDetected},
+		{http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
+	},
+}
+
+// validateHTTPStatusCode validates the HTTP status code and returns
+// corresponding span status code. If the `code` is not a valid HTTP status
+// code, returns span status Error and false.
+func validateHTTPStatusCode(code int) (codes.Code, bool) {
+	category := code / 100
+	ranges, ok := validRangesPerCategory[category]
+	if !ok {
+		return codes.Error, false
+	}
+	ok = false
+	for _, crange := range ranges {
+		ok = crange.contains(code)
+		if ok {
+			break
+		}
+	}
+	if !ok {
+		return codes.Error, false
+	}
+	if category > 0 && category < 4 {
+		return codes.Unset, true
+	}
+	return codes.Error, true
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go
new file mode 100644
index 000000000..4a711133a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go
@@ -0,0 +1,324 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal // import "go.opentelemetry.io/otel/semconv/internal/v2"
+
+import (
+	"net"
+	"strconv"
+	"strings"
+
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// NetConv are the network semantic convention attributes defined for a version
+// of the OpenTelemetry specification.
+type NetConv struct {
+	NetHostNameKey     attribute.Key
+	NetHostPortKey     attribute.Key
+	NetPeerNameKey     attribute.Key
+	NetPeerPortKey     attribute.Key
+	NetSockFamilyKey   attribute.Key
+	NetSockPeerAddrKey attribute.Key
+	NetSockPeerPortKey attribute.Key
+	NetSockHostAddrKey attribute.Key
+	NetSockHostPortKey attribute.Key
+	NetTransportOther  attribute.KeyValue
+	NetTransportTCP    attribute.KeyValue
+	NetTransportUDP    attribute.KeyValue
+	NetTransportInProc attribute.KeyValue
+}
+
+func (c *NetConv) Transport(network string) attribute.KeyValue {
+	switch network {
+	case "tcp", "tcp4", "tcp6":
+		return c.NetTransportTCP
+	case "udp", "udp4", "udp6":
+		return c.NetTransportUDP
+	case "unix", "unixgram", "unixpacket":
+		return c.NetTransportInProc
+	default:
+		// "ip:*", "ip4:*", and "ip6:*" all are considered other.
+		return c.NetTransportOther
+	}
+}
+
+// Host returns attributes for a network host address.
+func (c *NetConv) Host(address string) []attribute.KeyValue {
+	h, p := splitHostPort(address)
+	var n int
+	if h != "" {
+		n++
+		if p > 0 {
+			n++
+		}
+	}
+
+	if n == 0 {
+		return nil
+	}
+
+	attrs := make([]attribute.KeyValue, 0, n)
+	attrs = append(attrs, c.HostName(h))
+	if p > 0 {
+		attrs = append(attrs, c.HostPort(int(p)))
+	}
+	return attrs
+}
+
+// Server returns attributes for a network listener listening at address. See
+// net.Listen for information about acceptable address values, address should
+// be the same as the one used to create ln. If ln is nil, only network host
+// attributes will be returned that describe address. Otherwise, the socket
+// level information about ln will also be included.
+func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue {
+	if ln == nil {
+		return c.Host(address)
+	}
+
+	lAddr := ln.Addr()
+	if lAddr == nil {
+		return c.Host(address)
+	}
+
+	hostName, hostPort := splitHostPort(address)
+	sockHostAddr, sockHostPort := splitHostPort(lAddr.String())
+	network := lAddr.Network()
+	sockFamily := family(network, sockHostAddr)
+
+	n := nonZeroStr(hostName, network, sockHostAddr, sockFamily)
+	n += positiveInt(hostPort, sockHostPort)
+	attr := make([]attribute.KeyValue, 0, n)
+	if hostName != "" {
+		attr = append(attr, c.HostName(hostName))
+		if hostPort > 0 {
+			// Only if net.host.name is set should net.host.port be.
+			attr = append(attr, c.HostPort(hostPort))
+		}
+	}
+	if network != "" {
+		attr = append(attr, c.Transport(network))
+	}
+	if sockFamily != "" {
+		attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
+	}
+	if sockHostAddr != "" {
+		attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
+		if sockHostPort > 0 {
+			// Only if net.sock.host.addr is set should net.sock.host.port be.
+			attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
+		}
+	}
+	return attr
+}
+
+func (c *NetConv) HostName(name string) attribute.KeyValue {
+	return c.NetHostNameKey.String(name)
+}
+
+func (c *NetConv) HostPort(port int) attribute.KeyValue {
+	return c.NetHostPortKey.Int(port)
+}
+
+// Client returns attributes for a client network connection to address. See
+// net.Dial for information about acceptable address values, address should be
+// the same as the one used to create conn. If conn is nil, only network peer
+// attributes will be returned that describe address. Otherwise, the socket
+// level information about conn will also be included.
+func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue {
+	if conn == nil {
+		return c.Peer(address)
+	}
+
+	lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr()
+
+	var network string
+	switch {
+	case lAddr != nil:
+		network = lAddr.Network()
+	case rAddr != nil:
+		network = rAddr.Network()
+	default:
+		return c.Peer(address)
+	}
+
+	peerName, peerPort := splitHostPort(address)
+	var (
+		sockFamily   string
+		sockPeerAddr string
+		sockPeerPort int
+		sockHostAddr string
+		sockHostPort int
+	)
+
+	if lAddr != nil {
+		sockHostAddr, sockHostPort = splitHostPort(lAddr.String())
+	}
+
+	if rAddr != nil {
+		sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String())
+	}
+
+	switch {
+	case sockHostAddr != "":
+		sockFamily = family(network, sockHostAddr)
+	case sockPeerAddr != "":
+		sockFamily = family(network, sockPeerAddr)
+	}
+
+	n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily)
+	n += positiveInt(peerPort, sockPeerPort, sockHostPort)
+	attr := make([]attribute.KeyValue, 0, n)
+	if peerName != "" {
+		attr = append(attr, c.PeerName(peerName))
+		if peerPort > 0 {
+			// Only if net.peer.name is set should net.peer.port be.
+			attr = append(attr, c.PeerPort(peerPort))
+		}
+	}
+	if network != "" {
+		attr = append(attr, c.Transport(network))
+	}
+	if sockFamily != "" {
+		attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
+	}
+	if sockPeerAddr != "" {
+		attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr))
+		if sockPeerPort > 0 {
+			// Only if net.sock.peer.addr is set should net.sock.peer.port be.
+			attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort))
+		}
+	}
+	if sockHostAddr != "" {
+		attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
+		if sockHostPort > 0 {
+			// Only if net.sock.host.addr is set should net.sock.host.port be.
+			attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
+		}
+	}
+	return attr
+}
+
+func family(network, address string) string {
+	switch network {
+	case "unix", "unixgram", "unixpacket":
+		return "unix"
+	default:
+		if ip := net.ParseIP(address); ip != nil {
+			if ip.To4() == nil {
+				return "inet6"
+			}
+			return "inet"
+		}
+	}
+	return ""
+}
+
+func nonZeroStr(strs ...string) int {
+	var n int
+	for _, str := range strs {
+		if str != "" {
+			n++
+		}
+	}
+	return n
+}
+
+func positiveInt(ints ...int) int {
+	var n int
+	for _, i := range ints {
+		if i > 0 {
+			n++
+		}
+	}
+	return n
+}
+
+// Peer returns attributes for a network peer address.
+func (c *NetConv) Peer(address string) []attribute.KeyValue {
+	h, p := splitHostPort(address)
+	var n int
+	if h != "" {
+		n++
+		if p > 0 {
+			n++
+		}
+	}
+
+	if n == 0 {
+		return nil
+	}
+
+	attrs := make([]attribute.KeyValue, 0, n)
+	attrs = append(attrs, c.PeerName(h))
+	if p > 0 {
+		attrs = append(attrs, c.PeerPort(int(p)))
+	}
+	return attrs
+}
+
+func (c *NetConv) PeerName(name string) attribute.KeyValue {
+	return c.NetPeerNameKey.String(name)
+}
+
+func (c *NetConv) PeerPort(port int) attribute.KeyValue {
+	return c.NetPeerPortKey.Int(port)
+}
+
+func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue {
+	return c.NetSockPeerAddrKey.String(addr)
+}
+
+func (c *NetConv) SockPeerPort(port int) attribute.KeyValue {
+	return c.NetSockPeerPortKey.Int(port)
+}
+
+// splitHostPort splits a network address hostport of the form "host",
+// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
+// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
+// port.
+//
+// An empty host is returned if it is not provided or unparsable. A negative
+// port is returned if it is not provided or unparsable.
+func splitHostPort(hostport string) (host string, port int) {
+	port = -1
+
+	if strings.HasPrefix(hostport, "[") {
+		addrEnd := strings.LastIndex(hostport, "]")
+		if addrEnd < 0 {
+			// Invalid hostport.
+			return
+		}
+		if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
+			host = hostport[1:addrEnd]
+			return
+		}
+	} else {
+		if i := strings.LastIndex(hostport, ":"); i < 0 {
+			host = hostport
+			return
+		}
+	}
+
+	host, pStr, err := net.SplitHostPort(hostport)
+	if err != nil {
+		return
+	}
+
+	p, err := strconv.ParseUint(pStr, 10, 16)
+	if err != nil {
+		return
+	}
+	return host, int(p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go
new file mode 100644
index 000000000..0c5fbdcee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.10.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go
new file mode 100644
index 000000000..f4fc8c7aa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/exception.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+const (
+	// ExceptionEventName is the name of the Span event representing an exception.
+	ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go
new file mode 100644
index 000000000..6271b1926
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go
@@ -0,0 +1,114 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+import (
+	"net/http"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/semconv/internal"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// HTTP scheme attributes.
+var (
+	HTTPSchemeHTTP  = HTTPSchemeKey.String("http")
+	HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
+
+var sc = &internal.SemanticConventions{
+	EnduserIDKey:                EnduserIDKey,
+	HTTPClientIPKey:             HTTPClientIPKey,
+	HTTPFlavorKey:               HTTPFlavorKey,
+	HTTPHostKey:                 HTTPHostKey,
+	HTTPMethodKey:               HTTPMethodKey,
+	HTTPRequestContentLengthKey: HTTPRequestContentLengthKey,
+	HTTPRouteKey:                HTTPRouteKey,
+	HTTPSchemeHTTP:              HTTPSchemeHTTP,
+	HTTPSchemeHTTPS:             HTTPSchemeHTTPS,
+	HTTPServerNameKey:           HTTPServerNameKey,
+	HTTPStatusCodeKey:           HTTPStatusCodeKey,
+	HTTPTargetKey:               HTTPTargetKey,
+	HTTPURLKey:                  HTTPURLKey,
+	HTTPUserAgentKey:            HTTPUserAgentKey,
+	NetHostIPKey:                NetHostIPKey,
+	NetHostNameKey:              NetHostNameKey,
+	NetHostPortKey:              NetHostPortKey,
+	NetPeerIPKey:                NetPeerIPKey,
+	NetPeerNameKey:              NetPeerNameKey,
+	NetPeerPortKey:              NetPeerPortKey,
+	NetTransportIP:              NetTransportIP,
+	NetTransportOther:           NetTransportOther,
+	NetTransportTCP:             NetTransportTCP,
+	NetTransportUDP:             NetTransportUDP,
+	NetTransportUnix:            NetTransportUnix,
+}
+
+// NetAttributesFromHTTPRequest generates attributes of the net
+// namespace as specified by the OpenTelemetry specification for a
+// span.  The network parameter is a string that net.Dial function
+// from standard library can understand.
+func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
+	return sc.NetAttributesFromHTTPRequest(network, request)
+}
+
+// EndUserAttributesFromHTTPRequest generates attributes of the
+// enduser namespace as specified by the OpenTelemetry specification
+// for a span.
+func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+	return sc.EndUserAttributesFromHTTPRequest(request)
+}
+
+// HTTPClientAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the client side.
+func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+	return sc.HTTPClientAttributesFromHTTPRequest(request)
+}
+
+// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
+// to be used with server-side HTTP metrics.
+func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
+	return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request)
+}
+
+// HTTPServerAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the server side. Currently, only basic authentication is
+// supported.
+func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
+	return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request)
+}
+
+// HTTPAttributesFromHTTPStatusCode generates attributes of the http
+// namespace as specified by the OpenTelemetry specification for a
+// span.
+func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
+	return sc.HTTPAttributesFromHTTPStatusCode(code)
+}
+
+// SpanStatusFromHTTPStatusCode generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
+	return internal.SpanStatusFromHTTPStatusCode(code)
+}
+
+// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+// Exclude 4xx for SERVER to set the appropriate status.
+func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
+	return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go
new file mode 100644
index 000000000..593017eea
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go
@@ -0,0 +1,981 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+	// Name of the cloud provider.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	CloudProviderKey = attribute.Key("cloud.provider")
+	// The cloud account ID the resource is assigned to.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '111111111111', 'opentelemetry'
+	CloudAccountIDKey = attribute.Key("cloud.account.id")
+	// The geographical region the resource is running.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'us-central1', 'us-east-1'
+	// Note: Refer to your provider's docs to see the available regions, for example
+	// [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-
+	// detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-
+	// infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-
+	// us/global-infrastructure/geographies/), [Google Cloud
+	// regions](https://cloud.google.com/about/locations), or [Tencent Cloud
+	// regions](https://intl.cloud.tencent.com/document/product/213/6091).
+	CloudRegionKey = attribute.Key("cloud.region")
+	// Cloud regions often have multiple, isolated locations known as zones to
+	// increase availability. Availability zone represents the zone where the resource
+	// is running.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'us-east-1c'
+	// Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
+	CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+	// The cloud platform in use.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Note: The prefix of the service SHOULD match the one specified in
+	// `cloud.provider`.
+	CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+	// Alibaba Cloud
+	CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	CloudProviderAWS = CloudProviderKey.String("aws")
+	// Microsoft Azure
+	CloudProviderAzure = CloudProviderKey.String("azure")
+	// Google Cloud Platform
+	CloudProviderGCP = CloudProviderKey.String("gcp")
+	// Tencent Cloud
+	CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+	// Alibaba Cloud Elastic Compute Service
+	CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+	// Alibaba Cloud Function Compute
+	CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+	// AWS Elastic Compute Cloud
+	CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+	// AWS Elastic Container Service
+	CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+	// AWS Elastic Kubernetes Service
+	CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+	// AWS Lambda
+	CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+	// AWS Elastic Beanstalk
+	CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+	// AWS App Runner
+	CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+	// Azure Virtual Machines
+	CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+	// Azure Container Instances
+	CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+	// Azure Kubernetes Service
+	CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+	// Azure Functions
+	CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+	// Azure App Service
+	CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+	// Google Cloud Compute Engine (GCE)
+	CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+	// Google Cloud Run
+	CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+	// Google Cloud Kubernetes Engine (GKE)
+	CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+	// Google Cloud Functions (GCF)
+	CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+	// Google Cloud App Engine (GAE)
+	CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+	// Tencent Cloud Cloud Virtual Machine (CVM)
+	CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+	// Tencent Cloud Elastic Kubernetes Service (EKS)
+	CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+	// Tencent Cloud Serverless Cloud Function (SCF)
+	CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+	// The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.
+	// amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-
+	// west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+	AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+	// The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo
+	// perguide/clusters.html).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+	// The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l
+	// aunch_types.html) for an ECS task.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+	// The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates
+	// t/developerguide/task_definitions.html).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-
+	// west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+	// The task definition family this task definition is a member of.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-family'
+	AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+	// The revision for this task definition.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '8', '26'
+	AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+	// ec2
+	AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+	// fargate
+	AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+	// The ARN of an EKS cluster.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// Resources specific to Amazon Web Services.
+const (
+	// The name(s) of the AWS log group(s) an application is writing to.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+	// Note: Multiple log groups must be supported for cases like multi-container
+	// applications, where a single application has sidecar containers, and each write
+	// to their own log group.
+	AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+	// The Amazon Resource Name(s) (ARN) of the AWS log group(s).
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+	// Note: See the [log group ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+	// access-control-overview-cwl.html#CWL_ARN_Format).
+	AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+	// The name(s) of the AWS log stream(s) an application is writing to.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+	// The ARN(s) of the AWS log stream(s).
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-
+	// stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	// Note: See the [log stream ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+	// access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain
+	// several log streams, so these ARNs necessarily identify both a log group and a
+	// log stream.
+	AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// A container instance.
+const (
+	// Container name used by container runtime.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-autoconf'
+	ContainerNameKey = attribute.Key("container.name")
+	// Container ID. Usually a UUID, as for example used to [identify Docker
+	// containers](https://docs.docker.com/engine/reference/run/#container-
+	// identification). The UUID might be abbreviated.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'a3bf90e006b2'
+	ContainerIDKey = attribute.Key("container.id")
+	// The container runtime managing this container.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'docker', 'containerd', 'rkt'
+	ContainerRuntimeKey = attribute.Key("container.runtime")
+	// Name of the image the container was built on.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'gcr.io/opentelemetry/operator'
+	ContainerImageNameKey = attribute.Key("container.image.name")
+	// Container image tag.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '0.1'
+	ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// The software deployment.
+const (
+	// Name of the [deployment
+	// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+	// deployment tier).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'staging', 'production'
+	DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// The device on which the process represented by this resource is running.
+const (
+	// A unique identifier representing the device
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+	// Note: The device identifier MUST only be defined using the values outlined
+	// below. This value is not an advertising identifier and MUST NOT be used as
+	// such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id
+	// entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden
+	// tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the
+	// Firebase Installation ID or a globally unique UUID which is persisted across
+	// sessions in your application. More information can be found
+	// [here](https://developer.android.com/training/articles/user-data-ids) on best
+	// practices and exact implementation details. Caution should be taken when
+	// storing personal data or anything which can identify a user. GDPR and data
+	// protection laws may apply, ensure you do your own due diligence.
+	DeviceIDKey = attribute.Key("device.id")
+	// The model identifier for the device
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'iPhone3,4', 'SM-G920F'
+	// Note: It's recommended this value represents a machine readable version of the
+	// model identifier rather than the market or consumer-friendly name of the
+	// device.
+	DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+	// The marketing name for the device model
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+	// Note: It's recommended this value represents a human readable version of the
+	// device model rather than a machine readable alternative.
+	DeviceModelNameKey = attribute.Key("device.model.name")
+	// The name of the device manufacturer
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Apple', 'Samsung'
+	// Note: The Android OS provides this field via
+	// [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+	// iOS apps SHOULD hardcode the value `Apple`.
+	DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// A serverless instance.
+const (
+	// The name of the single function that this runtime instance executes.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'my-function'
+	// Note: This is the name of the function as configured/deployed on the FaaS
+	// platform and is usually different from the name of the callback function (which
+	// may be stored in the
+	// [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-
+	// general.md#source-code-attributes) span attributes).
+	FaaSNameKey = attribute.Key("faas.name")
+	// The unique ID of the single function that this runtime instance executes.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+	// Note: Depending on the cloud provider, use:
+
+	// * **AWS Lambda:** The function
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-
+	// namespaces.html).
+	// Take care not to use the "invoked ARN" directly but replace any
+	// [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+	// aliases.html) with the resolved function version, as the same runtime instance
+	// may be invokable with multiple
+	// different aliases.
+	// * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-
+	// resource-names)
+	// * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-
+	// us/rest/api/resources/resources/get-by-id).
+
+	// On some providers, it may not be possible to determine the full ID at startup,
+	// which is why this field cannot be made required. For example, on AWS the
+	// account ID
+	// part of the ARN is not available without calling another AWS API
+	// which may be deemed too slow for a short-running lambda function.
+	// As an alternative, consider setting `faas.id` as a span attribute instead.
+	FaaSIDKey = attribute.Key("faas.id")
+	// The immutable version of the function being executed.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '26', 'pinkfroid-00002'
+	// Note: Depending on the cloud provider and platform, use:
+
+	// * **AWS Lambda:** The [function
+	// version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+	// versions.html)
+	//   (an integer represented as a decimal string).
+	// * **Google Cloud Run:** The
+	// [revision](https://cloud.google.com/run/docs/managing/revisions)
+	//   (i.e., the function name plus the revision suffix).
+	// * **Google Cloud Functions:** The value of the
+	//   [`K_REVISION` environment
+	// variable](https://cloud.google.com/functions/docs/env-
+	// var#runtime_environment_variables_set_automatically).
+	// * **Azure Functions:** Not applicable. Do not set this attribute.
+	FaaSVersionKey = attribute.Key("faas.version")
+	// The execution environment ID as a string, that will be potentially reused for
+	// other invocations to the same function/function version.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+	// Note: * **AWS Lambda:** Use the (full) log stream name.
+	FaaSInstanceKey = attribute.Key("faas.instance")
+	// The amount of memory available to the serverless function in MiB.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 128
+	// Note: It's recommended to set this attribute since e.g. too little memory can
+	// easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+	// the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+	// information.
+	FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// A host is defined as a general computing instance.
+const (
+	// Unique host ID. For Cloud, this must be the instance_id assigned by the cloud
+	// provider.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-test'
+	HostIDKey = attribute.Key("host.id")
+	// Name of the host. On Unix systems, it may contain what the hostname command
+	// returns, or the fully qualified hostname, or another name specified by the
+	// user.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-test'
+	HostNameKey = attribute.Key("host.name")
+	// Type of host. For Cloud, this must be the machine type.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'n1-standard-1'
+	HostTypeKey = attribute.Key("host.type")
+	// The CPU architecture the host system is running on.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	HostArchKey = attribute.Key("host.arch")
+	// Name of the VM image or OS install the host was instantiated from.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+	HostImageNameKey = attribute.Key("host.image.name")
+	// VM image ID. For Cloud, this value is from the provider.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'ami-07b06b442921831e5'
+	HostImageIDKey = attribute.Key("host.image.id")
+	// The version string of the VM image as defined in [Version
+	// Attributes](README.md#version-attributes).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '0.1'
+	HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+	// AMD64
+	HostArchAMD64 = HostArchKey.String("amd64")
+	// ARM32
+	HostArchARM32 = HostArchKey.String("arm32")
+	// ARM64
+	HostArchARM64 = HostArchKey.String("arm64")
+	// Itanium
+	HostArchIA64 = HostArchKey.String("ia64")
+	// 32-bit PowerPC
+	HostArchPPC32 = HostArchKey.String("ppc32")
+	// 64-bit PowerPC
+	HostArchPPC64 = HostArchKey.String("ppc64")
+	// IBM z/Architecture
+	HostArchS390x = HostArchKey.String("s390x")
+	// 32-bit x86
+	HostArchX86 = HostArchKey.String("x86")
+)
+
+// A Kubernetes Cluster.
+const (
+	// The name of the cluster.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-cluster'
+	K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// A Kubernetes Node object.
+const (
+	// The name of the Node.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'node-1'
+	K8SNodeNameKey = attribute.Key("k8s.node.name")
+	// The UID of the Node.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+	K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// A Kubernetes Namespace.
+const (
+	// The name of the namespace that the pod is running in.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'default'
+	K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// A Kubernetes Pod object.
+const (
+	// The UID of the Pod.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+	// The name of the Pod.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-pod-autoconf'
+	K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+	// The name of the Container from Pod specification, must be unique within a Pod.
+	// Container runtime usually uses different globally unique name
+	// (`container.name`).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'redis'
+	K8SContainerNameKey = attribute.Key("k8s.container.name")
+	// Number of times the container was restarted. This attribute can be used to
+	// identify a particular container (running or stopped) within a container spec.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 0, 2
+	K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// A Kubernetes ReplicaSet object.
+const (
+	// The UID of the ReplicaSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+	// The name of the ReplicaSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// A Kubernetes Deployment object.
+const (
+	// The UID of the Deployment.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+	// The name of the Deployment.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// A Kubernetes StatefulSet object.
+const (
+	// The UID of the StatefulSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+	// The name of the StatefulSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// A Kubernetes DaemonSet object.
+const (
+	// The UID of the DaemonSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+	// The name of the DaemonSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// A Kubernetes Job object.
+const (
+	// The UID of the Job.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SJobUIDKey = attribute.Key("k8s.job.uid")
+	// The name of the Job.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// A Kubernetes CronJob object.
+const (
+	// The UID of the CronJob.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+	// The name of the CronJob.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// The operating system (OS) on which the process represented by this resource is running.
+const (
+	// The operating system type.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	OSTypeKey = attribute.Key("os.type")
+	// Human readable (not intended to be parsed) OS version information, like e.g.
+	// reported by `ver` or `lsb_release -a` commands.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS'
+	OSDescriptionKey = attribute.Key("os.description")
+	// Human readable operating system name.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'iOS', 'Android', 'Ubuntu'
+	OSNameKey = attribute.Key("os.name")
+	// The version string of the operating system as defined in [Version
+	// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '14.2.1', '18.04.1'
+	OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+	// Microsoft Windows
+	OSTypeWindows = OSTypeKey.String("windows")
+	// Linux
+	OSTypeLinux = OSTypeKey.String("linux")
+	// Apple Darwin
+	OSTypeDarwin = OSTypeKey.String("darwin")
+	// FreeBSD
+	OSTypeFreeBSD = OSTypeKey.String("freebsd")
+	// NetBSD
+	OSTypeNetBSD = OSTypeKey.String("netbsd")
+	// OpenBSD
+	OSTypeOpenBSD = OSTypeKey.String("openbsd")
+	// DragonFly BSD
+	OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+	// HP-UX (Hewlett Packard Unix)
+	OSTypeHPUX = OSTypeKey.String("hpux")
+	// AIX (Advanced Interactive eXecutive)
+	OSTypeAIX = OSTypeKey.String("aix")
+	// Oracle Solaris
+	OSTypeSolaris = OSTypeKey.String("solaris")
+	// IBM z/OS
+	OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// An operating system process.
+const (
+	// Process identifier (PID).
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 1234
+	ProcessPIDKey = attribute.Key("process.pid")
+	// The name of the process executable. On Linux based systems, can be set to the
+	// `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of
+	// `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// Required: See below
+	// Stability: stable
+	// Examples: 'otelcol'
+	ProcessExecutableNameKey = attribute.Key("process.executable.name")
+	// The full path to the process executable. On Linux based systems, can be set to
+	// the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+	// `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// Required: See below
+	// Stability: stable
+	// Examples: '/usr/bin/cmd/otelcol'
+	ProcessExecutablePathKey = attribute.Key("process.executable.path")
+	// The command used to launch the process (i.e. the command name). On Linux based
+	// systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows,
+	// can be set to the first parameter extracted from `GetCommandLineW`.
+	//
+	// Type: string
+	// Required: See below
+	// Stability: stable
+	// Examples: 'cmd/otelcol'
+	ProcessCommandKey = attribute.Key("process.command")
+	// The full command used to launch the process as a single string representing the
+	// full command. On Windows, can be set to the result of `GetCommandLineW`. Do not
+	// set this if you have to assemble it just for monitoring; use
+	// `process.command_args` instead.
+	//
+	// Type: string
+	// Required: See below
+	// Stability: stable
+	// Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+	ProcessCommandLineKey = attribute.Key("process.command_line")
+	// All the command arguments (including the command/executable itself) as received
+	// by the process. On Linux-based systems (and some other Unixoid systems
+	// supporting procfs), can be set according to the list of null-delimited strings
+	// extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be
+	// the full argv vector passed to `main`.
+	//
+	// Type: string[]
+	// Required: See below
+	// Stability: stable
+	// Examples: 'cmd/otecol', '--config=config.yaml'
+	ProcessCommandArgsKey = attribute.Key("process.command_args")
+	// The username of the user that owns the process.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'root'
+	ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// The single (language) runtime instance which is monitored.
+const (
+	// The name of the runtime of this process. For compiled native binaries, this
+	// SHOULD be the name of the compiler.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'OpenJDK Runtime Environment'
+	ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+	// The version of the runtime of this process, as returned by the runtime without
+	// modification.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '14.0.2'
+	ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+	// An additional description about the runtime of the process, for example a
+	// specific vendor customization of the runtime environment.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+	ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// A service instance.
+const (
+	// Logical name of the service.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'shoppingcart'
+	// Note: MUST be the same for all instances of horizontally scaled services. If
+	// the value was not specified, SDKs MUST fallback to `unknown_service:`
+	// concatenated with [`process.executable.name`](process.md#process), e.g.
+	// `unknown_service:bash`. If `process.executable.name` is not available, the
+	// value MUST be set to `unknown_service`.
+	ServiceNameKey = attribute.Key("service.name")
+	// A namespace for `service.name`.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Shop'
+	// Note: A string value having a meaning that helps to distinguish a group of
+	// services, for example the team name that owns a group of services.
+	// `service.name` is expected to be unique within the same namespace. If
+	// `service.namespace` is not specified in the Resource then `service.name` is
+	// expected to be unique for all services that have no explicit namespace defined
+	// (so the empty/unspecified namespace is simply one more valid namespace). Zero-
+	// length namespace string is assumed equal to unspecified namespace.
+	ServiceNamespaceKey = attribute.Key("service.namespace")
+	// The string ID of the service instance.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+	// Note: MUST be unique for each instance of the same
+	// `service.namespace,service.name` pair (in other words
+	// `service.namespace,service.name,service.instance.id` triplet MUST be globally
+	// unique). The ID helps to distinguish instances of the same service that exist
+	// at the same time (e.g. instances of a horizontally scaled service). It is
+	// preferable for the ID to be persistent and stay the same for the lifetime of
+	// the service instance, however it is acceptable that the ID is ephemeral and
+	// changes during important lifetime events for the service (e.g. service
+	// restarts). If the service has no inherent unique ID that can be used as the
+	// value of this attribute it is recommended to generate a random Version 1 or
+	// Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+	// Version 5, see RFC 4122 for more recommendations).
+	ServiceInstanceIDKey = attribute.Key("service.instance.id")
+	// The version string of the service API or implementation.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '2.0.0'
+	ServiceVersionKey = attribute.Key("service.version")
+)
+
+// The telemetry SDK used to capture data recorded by the instrumentation libraries.
+const (
+	// The name of the telemetry SDK as defined above.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+	// The language of the telemetry SDK.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+	// The version string of the telemetry SDK.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '1.2.3'
+	TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+	// The version string of the auto instrumentation agent, if used.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '1.2.3'
+	TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+	// cpp
+	TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+	// dotnet
+	TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+	// erlang
+	TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+	// go
+	TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+	// java
+	TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+	// nodejs
+	TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+	// php
+	TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+	// python
+	TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+	// ruby
+	TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+	// webjs
+	TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+	// swift
+	TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime.
+const (
+	// The name of the web engine.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'WildFly'
+	WebEngineNameKey = attribute.Key("webengine.name")
+	// The version of the web engine.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '21.0.0'
+	WebEngineVersionKey = attribute.Key("webengine.version")
+	// Additional description of the web engine (e.g. detailed version and edition
+	// information).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final'
+	WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go
new file mode 100644
index 000000000..74f2e1d50
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.10.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go
new file mode 100644
index 000000000..faa3d9c86
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go
@@ -0,0 +1,1700 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Span attributes used by AWS Lambda (in addition to general `faas` attributes).
+const (
+	// The full invoked ARN as provided on the `Context` passed to the function
+	// (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next`
+	// applicable).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+	// Note: This may be different from `faas.id` if an alias is involved.
+	AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used.
+const (
+	// The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec
+	// .md#id) uniquely identifies the event.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+	CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+	// The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m
+	// d#source-1) identifies the context in which an event happened.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my-
+	// service'
+	CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+	// The [version of the CloudEvents specification](https://github.com/cloudevents/s
+	// pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: '1.0'
+	CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+	// The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp
+	// ec.md#type) contains a value describing the type of event related to the
+	// originating occurrence.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2'
+	CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+	// The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.
+	// md#subject) of the event in the context of the event producer (identified by
+	// source).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'mynewfile.jpg'
+	CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// This document defines semantic conventions for the OpenTracing Shim
+const (
+	// Parent-child Reference type
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Note: The causal relationship between a child Span and a parent Span.
+	OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+	// The parent Span depends on the child Span in some capacity
+	OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+	// The parent Span does not depend in any way on the result of the child Span
+	OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// This document defines the attributes used to perform database client calls.
+const (
+	// An identifier for the database management system (DBMS) product being used. See
+	// below for a list of well-known identifiers.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	DBSystemKey = attribute.Key("db.system")
+	// The connection string used to connect to the database. It is recommended to
+	// remove embedded credentials.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+	DBConnectionStringKey = attribute.Key("db.connection_string")
+	// Username for accessing the database.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'readonly_user', 'reporting_user'
+	DBUserKey = attribute.Key("db.user")
+	// The fully-qualified class name of the [Java Database Connectivity
+	// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+	// used to connect.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'org.postgresql.Driver',
+	// 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+	DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+	// This attribute is used to report the name of the database being accessed. For
+	// commands that switch the database, this should be set to the target database
+	// (even if the command fails).
+	//
+	// Type: string
+	// Required: Required, if applicable.
+	// Stability: stable
+	// Examples: 'customers', 'main'
+	// Note: In some SQL databases, the database name to be used is called "schema
+	// name". In case there are multiple layers that could be considered for database
+	// name (e.g. Oracle instance name and schema name), the database name to be used
+	// is the more specific layer (e.g. Oracle schema name).
+	DBNameKey = attribute.Key("db.name")
+	// The database statement being executed.
+	//
+	// Type: string
+	// Required: Required if applicable and not explicitly disabled via
+	// instrumentation configuration.
+	// Stability: stable
+	// Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+	// Note: The value may be sanitized to exclude sensitive information.
+	DBStatementKey = attribute.Key("db.statement")
+	// The name of the operation being executed, e.g. the [MongoDB command
+	// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+	// such as `findAndModify`, or the SQL keyword.
+	//
+	// Type: string
+	// Required: Required, if `db.statement` is not applicable.
+	// Stability: stable
+	// Examples: 'findAndModify', 'HMSET', 'SELECT'
+	// Note: When setting this to an SQL keyword, it is not recommended to attempt any
+	// client-side parsing of `db.statement` just to get this property, but it should
+	// be set if the operation name is provided by the library being instrumented. If
+	// the SQL statement has an ambiguous operation, or performs more than one
+	// operation, this value may be omitted.
+	DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+	// Some other SQL database. Fallback only. See notes
+	DBSystemOtherSQL = DBSystemKey.String("other_sql")
+	// Microsoft SQL Server
+	DBSystemMSSQL = DBSystemKey.String("mssql")
+	// MySQL
+	DBSystemMySQL = DBSystemKey.String("mysql")
+	// Oracle Database
+	DBSystemOracle = DBSystemKey.String("oracle")
+	// IBM DB2
+	DBSystemDB2 = DBSystemKey.String("db2")
+	// PostgreSQL
+	DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+	// Amazon Redshift
+	DBSystemRedshift = DBSystemKey.String("redshift")
+	// Apache Hive
+	DBSystemHive = DBSystemKey.String("hive")
+	// Cloudscape
+	DBSystemCloudscape = DBSystemKey.String("cloudscape")
+	// HyperSQL DataBase
+	DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+	// Progress Database
+	DBSystemProgress = DBSystemKey.String("progress")
+	// SAP MaxDB
+	DBSystemMaxDB = DBSystemKey.String("maxdb")
+	// SAP HANA
+	DBSystemHanaDB = DBSystemKey.String("hanadb")
+	// Ingres
+	DBSystemIngres = DBSystemKey.String("ingres")
+	// FirstSQL
+	DBSystemFirstSQL = DBSystemKey.String("firstsql")
+	// EnterpriseDB
+	DBSystemEDB = DBSystemKey.String("edb")
+	// InterSystems Caché
+	DBSystemCache = DBSystemKey.String("cache")
+	// Adabas (Adaptable Database System)
+	DBSystemAdabas = DBSystemKey.String("adabas")
+	// Firebird
+	DBSystemFirebird = DBSystemKey.String("firebird")
+	// Apache Derby
+	DBSystemDerby = DBSystemKey.String("derby")
+	// FileMaker
+	DBSystemFilemaker = DBSystemKey.String("filemaker")
+	// Informix
+	DBSystemInformix = DBSystemKey.String("informix")
+	// InstantDB
+	DBSystemInstantDB = DBSystemKey.String("instantdb")
+	// InterBase
+	DBSystemInterbase = DBSystemKey.String("interbase")
+	// MariaDB
+	DBSystemMariaDB = DBSystemKey.String("mariadb")
+	// Netezza
+	DBSystemNetezza = DBSystemKey.String("netezza")
+	// Pervasive PSQL
+	DBSystemPervasive = DBSystemKey.String("pervasive")
+	// PointBase
+	DBSystemPointbase = DBSystemKey.String("pointbase")
+	// SQLite
+	DBSystemSqlite = DBSystemKey.String("sqlite")
+	// Sybase
+	DBSystemSybase = DBSystemKey.String("sybase")
+	// Teradata
+	DBSystemTeradata = DBSystemKey.String("teradata")
+	// Vertica
+	DBSystemVertica = DBSystemKey.String("vertica")
+	// H2
+	DBSystemH2 = DBSystemKey.String("h2")
+	// ColdFusion IMQ
+	DBSystemColdfusion = DBSystemKey.String("coldfusion")
+	// Apache Cassandra
+	DBSystemCassandra = DBSystemKey.String("cassandra")
+	// Apache HBase
+	DBSystemHBase = DBSystemKey.String("hbase")
+	// MongoDB
+	DBSystemMongoDB = DBSystemKey.String("mongodb")
+	// Redis
+	DBSystemRedis = DBSystemKey.String("redis")
+	// Couchbase
+	DBSystemCouchbase = DBSystemKey.String("couchbase")
+	// CouchDB
+	DBSystemCouchDB = DBSystemKey.String("couchdb")
+	// Microsoft Azure Cosmos DB
+	DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+	// Amazon DynamoDB
+	DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+	// Neo4j
+	DBSystemNeo4j = DBSystemKey.String("neo4j")
+	// Apache Geode
+	DBSystemGeode = DBSystemKey.String("geode")
+	// Elasticsearch
+	DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+	// Memcached
+	DBSystemMemcached = DBSystemKey.String("memcached")
+	// CockroachDB
+	DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+)
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+	// The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-
+	// us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+	// connecting to. This name is used to determine the port of a named instance.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'MSSQLSERVER'
+	// Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer
+	// required (but still recommended if non-standard).
+	DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// Call-level attributes for Cassandra
+const (
+	// The fetch size used for paging, i.e. how many rows will be returned at once.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 5000
+	DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+	// The consistency level of the query. Based on consistency values from
+	// [CQL](https://docs.datastax.com/en/cassandra-
+	// oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+	// The name of the primary table that the operation is acting upon, including the
+	// keyspace name (if applicable).
+	//
+	// Type: string
+	// Required: Recommended if available.
+	// Stability: stable
+	// Examples: 'mytable'
+	// Note: This mirrors the db.sql.table attribute but references cassandra rather
+	// than sql. It is not recommended to attempt any client-side parsing of
+	// `db.statement` just to get this property, but it should be set if it is
+	// provided by the library being instrumented. If the operation is acting upon an
+	// anonymous table, or more than one table, this value MUST NOT be set.
+	DBCassandraTableKey = attribute.Key("db.cassandra.table")
+	// Whether or not the query is idempotent.
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+	// The number of times a query was speculatively executed. Not set or `0` if the
+	// query was not executed speculatively.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 0, 2
+	DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+	// The ID of the coordinating node for a query.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+	DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+	// The data center of the coordinating node for a query.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'us-west-2'
+	DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+	// all
+	DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+	// each_quorum
+	DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+	// quorum
+	DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+	// local_quorum
+	DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+	// one
+	DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+	// two
+	DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+	// three
+	DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+	// local_one
+	DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+	// any
+	DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+	// serial
+	DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+	// local_serial
+	DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Call-level attributes for Redis
+const (
+	// The index of the database being accessed as used in the [`SELECT`
+	// command](https://redis.io/commands/select), provided as an integer. To be used
+	// instead of the generic `db.name` attribute.
+	//
+	// Type: int
+	// Required: Required, if other than the default database (`0`).
+	// Stability: stable
+	// Examples: 0, 1, 15
+	DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// Call-level attributes for MongoDB
+const (
+	// The collection being accessed within the database stated in `db.name`.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'customers', 'products'
+	DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// Call-level attributes for SQL databases
+const (
+	// The name of the primary table that the operation is acting upon, including the
+	// database name (if applicable).
+	//
+	// Type: string
+	// Required: Recommended if available.
+	// Stability: stable
+	// Examples: 'public.users', 'customers'
+	// Note: It is not recommended to attempt any client-side parsing of
+	// `db.statement` just to get this property, but it should be set if it is
+	// provided by the library being instrumented. If the operation is acting upon an
+	// anonymous table, or more than one table, this value MUST NOT be set.
+	DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// This document defines the attributes used to report a single exception associated with a span.
+const (
+	// The type of the exception (its fully-qualified class name, if applicable). The
+	// dynamic type of the exception should be preferred over the static type in
+	// languages that support it.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'java.net.ConnectException', 'OSError'
+	ExceptionTypeKey = attribute.Key("exception.type")
+	// The exception message.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Division by zero', "Can't convert 'int' object to str implicitly"
+	ExceptionMessageKey = attribute.Key("exception.message")
+	// A stacktrace as a string in the natural representation for the language
+	// runtime. The representation is to be determined and documented by each language
+	// SIG.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+	// exception\\n at '
+	//  'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+	ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+	// SHOULD be set to true if the exception event is recorded at a point where it is
+	// known that the exception is escaping the scope of the span.
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	// Note: An exception is considered to have escaped (or left) the scope of a span,
+	// if that span is ended while the exception is still logically "in flight".
+	// This may be actually "in flight" in some languages (e.g. if the exception
+	// is passed to a Context manager's `__exit__` method in Python) but will
+	// usually be caught at the point of recording the exception in most languages.
+
+	// It is usually not possible to determine at the point where an exception is
+	// thrown
+	// whether it will escape the scope of a span.
+	// However, it is trivial to know that an exception
+	// will escape, if one checks for an active exception just before ending the span,
+	// as done in the [example above](#recording-an-exception).
+
+	// It follows that an exception may still escape the scope of the span
+	// even if the `exception.escaped` attribute was not set or set to false,
+	// since the event might have been recorded at a time where it was not
+	// clear whether the exception will escape.
+	ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans.
+const (
+	// Type of the trigger which caused this function execution.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Note: For the server/consumer span on the incoming side,
+	// `faas.trigger` MUST be set.
+
+	// Clients invoking FaaS instances usually cannot set `faas.trigger`,
+	// since they would typically need to look in the payload to determine
+	// the event type. If clients set it, it should be the same as the
+	// trigger that corresponding incoming would have (i.e., this has
+	// nothing to do with the underlying transport used to make the API
+	// call to invoke the lambda, which is often HTTP).
+	FaaSTriggerKey = attribute.Key("faas.trigger")
+	// The execution ID of the current function execution.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+	FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+	// A response to some data source operation such as a database or filesystem read/write
+	FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+	// To provide an answer to an inbound HTTP request
+	FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+	// A function is set to be executed when messages are sent to a messaging system
+	FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+	// A function is scheduled to be executed regularly
+	FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+	// If none of the others apply
+	FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write.
+const (
+	// The name of the source on which the triggering operation was performed. For
+	// example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos
+	// DB to the database name.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'myBucketName', 'myDBName'
+	FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+	// Describes the type of the operation that was performed on the data.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+	// A string containing the time when the data was accessed in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+	// in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+	// The document name/table subjected to the operation. For example, in Cloud
+	// Storage or S3 is the name of the file, and in Cosmos DB the table name.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'myFile.txt', 'myTableName'
+	FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+	// When a new object is created
+	FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+	// When an object is modified
+	FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+	// When an object is deleted
+	FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+	// A string containing the function invocation time in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+	// in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSTimeKey = attribute.Key("faas.time")
+	// A string containing the schedule period as [Cron Expression](https://docs.oracl
+	// e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '0/5 * * * ? *'
+	FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+	// A boolean that is true if the serverless function is executed for the first
+	// time (aka cold-start).
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+	// The name of the invoked function.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'my-function'
+	// Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+	// function.
+	FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+	// The cloud provider of the invoked function.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	// Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked
+	// function.
+	FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+	// The cloud region of the invoked function.
+	//
+	// Type: string
+	// Required: For some cloud providers, like AWS or GCP, the region in which a
+	// function is hosted is essential to uniquely identify the function and also part
+	// of its endpoint. Since it's part of the endpoint being called, the region is
+	// always known to clients. In these cases, `faas.invoked_region` MUST be set
+	// accordingly. If the region is unknown to the client or not required for
+	// identifying the invoked function, setting `faas.invoked_region` is optional.
+	// Stability: stable
+	// Examples: 'eu-central-1'
+	// Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+	// function.
+	FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+	// Alibaba Cloud
+	FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+	// Microsoft Azure
+	FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+	// Google Cloud Platform
+	FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+	// Tencent Cloud
+	FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// These attributes may be used for any network related operation.
+const (
+	// Transport protocol used. See note below.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	NetTransportKey = attribute.Key("net.transport")
+	// Remote address of the peer (dotted decimal for IPv4 or
+	// [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6)
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '127.0.0.1'
+	NetPeerIPKey = attribute.Key("net.peer.ip")
+	// Remote port number.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 80, 8080, 443
+	NetPeerPortKey = attribute.Key("net.peer.port")
+	// Remote hostname or similar, see note below.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'example.com'
+	NetPeerNameKey = attribute.Key("net.peer.name")
+	// Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '192.168.0.1'
+	NetHostIPKey = attribute.Key("net.host.ip")
+	// Like `net.peer.port` but for the host port.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 35555
+	NetHostPortKey = attribute.Key("net.host.port")
+	// Local hostname or similar, see note below.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'localhost'
+	NetHostNameKey = attribute.Key("net.host.name")
+	// The internet connection type currently being used by the host.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Examples: 'wifi'
+	NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+	// This describes more details regarding the connection.type. It may be the type
+	// of cell technology connection, but it could be used for describing details
+	// about a wifi connection.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Examples: 'LTE'
+	NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+	// The name of the mobile carrier.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'sprint'
+	NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+	// The mobile carrier country code.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '310'
+	NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+	// The mobile carrier network code.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '001'
+	NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+	// The ISO 3166-1 alpha-2 2-character country code associated with the mobile
+	// carrier network.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'DE'
+	NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+	// ip_tcp
+	NetTransportTCP = NetTransportKey.String("ip_tcp")
+	// ip_udp
+	NetTransportUDP = NetTransportKey.String("ip_udp")
+	// Another IP-based protocol
+	NetTransportIP = NetTransportKey.String("ip")
+	// Unix Domain socket. See below
+	NetTransportUnix = NetTransportKey.String("unix")
+	// Named or anonymous pipe. See note below
+	NetTransportPipe = NetTransportKey.String("pipe")
+	// In-process communication
+	NetTransportInProc = NetTransportKey.String("inproc")
+	// Something else (non IP-based)
+	NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+	// wifi
+	NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+	// wired
+	NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+	// cell
+	NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+	// unavailable
+	NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+	// unknown
+	NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+	// GPRS
+	NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+	// EDGE
+	NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+	// UMTS
+	NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+	// CDMA
+	NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+	// EVDO Rel. 0
+	NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+	// EVDO Rev. A
+	NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+	// CDMA2000 1XRTT
+	NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+	// HSDPA
+	NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+	// HSUPA
+	NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+	// HSPA
+	NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+	// IDEN
+	NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+	// EVDO Rev. B
+	NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+	// LTE
+	NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+	// EHRPD
+	NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+	// HSPAP
+	NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+	// GSM
+	NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+	// TD-SCDMA
+	NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+	// IWLAN
+	NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+	// 5G NR (New Radio)
+	NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+	// 5G NRNSA (New Radio Non-Standalone)
+	NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+	// LTE CA
+	NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// Operations that access some remote service.
+const (
+	// The [`service.name`](../../resource/semantic_conventions/README.md#service) of
+	// the remote service. SHOULD be equal to the actual `service.name` resource
+	// attribute of the remote service if any.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'AuthTokenCache'
+	PeerServiceKey = attribute.Key("peer.service")
+)
+
+// These attributes may be used for any operation with an authenticated and/or authorized enduser.
+const (
+	// Username or client_id extracted from the access token or
+	// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the
+	// inbound request from outside the system.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'username'
+	EnduserIDKey = attribute.Key("enduser.id")
+	// Actual/assumed role the client is making the request under extracted from token
+	// or application security context.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'admin'
+	EnduserRoleKey = attribute.Key("enduser.role")
+	// Scopes or granted authorities the client currently possesses extracted from
+	// token or application security context. The value would come from the scope
+	// associated with an [OAuth 2.0 Access
+	// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value
+	// in a [SAML 2.0 Assertion](http://docs.oasis-
+	// open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'read:message, write:files'
+	EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// These attributes may be used for any operation to store information about a thread that started a span.
+const (
+	// Current "managed" thread ID (as opposed to OS thread ID).
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 42
+	ThreadIDKey = attribute.Key("thread.id")
+	// Current thread name.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'main'
+	ThreadNameKey = attribute.Key("thread.name")
+)
+
+// These attributes allow to report this unit of code and therefore to provide more context about the span.
+const (
+	// The method or function name, or equivalent (usually rightmost part of the code
+	// unit's name).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'serveRequest'
+	CodeFunctionKey = attribute.Key("code.function")
+	// The "namespace" within which `code.function` is defined. Usually the qualified
+	// class or module name, such that `code.namespace` + some separator +
+	// `code.function` form a unique identifier for the code unit.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'com.example.MyHTTPService'
+	CodeNamespaceKey = attribute.Key("code.namespace")
+	// The source code file name that identifies the code unit as uniquely as possible
+	// (preferably an absolute file path).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '/usr/local/MyApplication/content_root/app/index.php'
+	CodeFilepathKey = attribute.Key("code.filepath")
+	// The line number in `code.filepath` best representing the operation. It SHOULD
+	// point within the code unit named in `code.function`.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 42
+	CodeLineNumberKey = attribute.Key("code.lineno")
+)
+
+// This document defines semantic conventions for HTTP client and server Spans.
+const (
+	// HTTP request method.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'GET', 'POST', 'HEAD'
+	HTTPMethodKey = attribute.Key("http.method")
+	// Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`.
+	// Usually the fragment is not transmitted over HTTP, but if it is known, it
+	// should be included nevertheless.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+	// Note: `http.url` MUST NOT contain credentials passed via URL in form of
+	// `https://username:password@www.example.com/`. In such case the attribute's
+	// value should be `https://www.example.com/`.
+	HTTPURLKey = attribute.Key("http.url")
+	// The full request target as passed in a HTTP request line or equivalent.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '/path/12314/?q=ddds#123'
+	HTTPTargetKey = attribute.Key("http.target")
+	// The value of the [HTTP host
+	// header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header
+	// should also be reported, see note.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'www.example.org'
+	// Note: When the header is present but empty the attribute SHOULD be set to the
+	// empty string. Note that this is a valid situation that is expected in certain
+	// cases, according the aforementioned [section of RFC
+	// 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not
+	// set the attribute MUST NOT be set.
+	HTTPHostKey = attribute.Key("http.host")
+	// The URI scheme identifying the used protocol.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'http', 'https'
+	HTTPSchemeKey = attribute.Key("http.scheme")
+	// [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6).
+	//
+	// Type: int
+	// Required: If and only if one was received/sent.
+	// Stability: stable
+	// Examples: 200
+	HTTPStatusCodeKey = attribute.Key("http.status_code")
+	// Kind of HTTP protocol used.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP`
+	// except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed.
+	HTTPFlavorKey = attribute.Key("http.flavor")
+	// Value of the [HTTP User-
+	// Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the
+	// client.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+	HTTPUserAgentKey = attribute.Key("http.user_agent")
+	// The size of the request payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as the
+	// [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
+	// requests using transport encoding, this should be the compressed size.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 3495
+	HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+	// The size of the uncompressed request payload body after transport decoding. Not
+	// set if transport encoding not used.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 5493
+	HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed")
+	// The size of the response payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as the
+	// [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
+	// requests using transport encoding, this should be the compressed size.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 3495
+	HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+	// The size of the uncompressed response payload body after transport decoding.
+	// Not set if transport encoding not used.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 5493
+	HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed")
+	// The ordinal number of request re-sending attempt.
+	//
+	// Type: int
+	// Required: If and only if a request was retried.
+	// Stability: stable
+	// Examples: 3
+	HTTPRetryCountKey = attribute.Key("http.retry_count")
+)
+
+var (
+	// HTTP 1.0
+	HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+	// HTTP 1.1
+	HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+	// HTTP 2
+	HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+	// SPDY protocol
+	HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+	// QUIC protocol
+	HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// Semantic Convention for HTTP Server
+const (
+	// The primary server name of the matched virtual host. This should be obtained
+	// via configuration. If no such configuration can be obtained, this attribute
+	// MUST NOT be set ( `net.host.name` should be used instead).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'example.com'
+	// Note: `http.url` is usually not readily available on the server side but would
+	// have to be assembled in a cumbersome and sometimes lossy process from other
+	// information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus
+	// preferred to supply the raw data that is available.
+	HTTPServerNameKey = attribute.Key("http.server_name")
+	// The matched route (path template).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '/users/:userID?'
+	HTTPRouteKey = attribute.Key("http.route")
+	// The IP address of the original client behind all proxies, if known (e.g. from
+	// [X-Forwarded-For](https://developer.mozilla.org/en-
+	// US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '83.164.160.102'
+	// Note: This is not necessarily the same as `net.peer.ip`, which would
+	// identify the network-level peer, which may be a proxy.
+
+	// This attribute should be set when a source of information different
+	// from the one used for `net.peer.ip`, is available even if that other
+	// source just confirms the same value as `net.peer.ip`.
+	// Rationale: For `net.peer.ip`, one typically does not know if it
+	// comes from a proxy, reverse proxy, or the actual client. Setting
+	// `http.client_ip` when it's the same as `net.peer.ip` means that
+	// one is at least somewhat confident that the address is not that of
+	// the closest proxy.
+	HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+	// The keys in the `RequestItems` object field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'Users', 'Cats'
+	AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+	// The JSON-serialized value of each item in the `ConsumedCapacity` response
+	// field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : {
+	// "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+	// number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+	// "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+	// "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+	// "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+	// "string", "WriteCapacityUnits": number }'
+	AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+	// The JSON-serialized value of the `ItemCollectionMetrics` response field.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+	// "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+	// "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+	// "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }'
+	AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+	// The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+	//
+	// Type: double
+	// Required: No
+	// Stability: stable
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+	// The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+	//
+	// Type: double
+	// Required: No
+	// Stability: stable
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+	// The value of the `ConsistentRead` request parameter.
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+	// The value of the `ProjectionExpression` request parameter.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems,
+	// ProductReviews'
+	AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+	// The value of the `Limit` request parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+	// The value of the `AttributesToGet` request parameter.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'lives', 'id'
+	AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+	// The value of the `IndexName` request parameter.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'name_to_group'
+	AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+	// The value of the `Select` request parameter.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'ALL_ATTRIBUTES', 'COUNT'
+	AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// DynamoDB.CreateTable
+const (
+	// The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request
+	// field
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string",
+	// "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+	// "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits":
+	// number, "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+	// The JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+	// field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes":
+	// number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+	// "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+	// "ProjectionType": "string" } }'
+	AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// DynamoDB.ListTables
+const (
+	// The value of the `ExclusiveStartTableName` request parameter.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Users', 'CatsTable'
+	AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+	// The the number of items in the `TableNames` response parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 20
+	AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// DynamoDB.Query
+const (
+	// The value of the `ScanIndexForward` request parameter.
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// DynamoDB.Scan
+const (
+	// The value of the `Segment` request parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+	// The value of the `TotalSegments` request parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 100
+	AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+	// The value of the `Count` response parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+	// The value of the `ScannedCount` response parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 50
+	AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// DynamoDB.UpdateTable
+const (
+	// The JSON-serialized value of each item in the `AttributeDefinitions` request
+	// field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+	AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+	// The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates`
+	// request field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+	// "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+	// number } }'
+	AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// This document defines the attributes used in messaging systems.
+const (
+	// A string identifying the messaging system.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+	MessagingSystemKey = attribute.Key("messaging.system")
+	// The message destination name. This might be equal to the span name but is
+	// required nevertheless.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'MyQueue', 'MyTopic'
+	MessagingDestinationKey = attribute.Key("messaging.destination")
+	// The kind of message destination
+	//
+	// Type: Enum
+	// Required: Required only if the message destination is either a `queue` or
+	// `topic`.
+	// Stability: stable
+	MessagingDestinationKindKey = attribute.Key("messaging.destination_kind")
+	// A boolean that is true if the message destination is temporary.
+	//
+	// Type: boolean
+	// Required: If missing, it is assumed to be false.
+	// Stability: stable
+	MessagingTempDestinationKey = attribute.Key("messaging.temp_destination")
+	// The name of the transport protocol.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'AMQP', 'MQTT'
+	MessagingProtocolKey = attribute.Key("messaging.protocol")
+	// The version of the transport protocol.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '0.9.1'
+	MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version")
+	// Connection string.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'tibjmsnaming://localhost:7222',
+	// 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue'
+	MessagingURLKey = attribute.Key("messaging.url")
+	// A value used by the messaging system as an identifier for the message,
+	// represented as a string.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+	MessagingMessageIDKey = attribute.Key("messaging.message_id")
+	// The [conversation ID](#conversations) identifying the conversation to which the
+	// message belongs, represented as a string. Sometimes called "Correlation ID".
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'MyConversationID'
+	MessagingConversationIDKey = attribute.Key("messaging.conversation_id")
+	// The (uncompressed) size of the message payload in bytes. Also use this
+	// attribute if it is unknown whether the compressed or uncompressed payload size
+	// is reported.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 2738
+	MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes")
+	// The compressed size of the message payload in bytes.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 2048
+	MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes")
+)
+
+var (
+	// A message sent to a queue
+	MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+	// A message sent to a topic
+	MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// Semantic convention for a consumer of messages received from a messaging system
+const (
+	// A string identifying the kind of message consumption as defined in the
+	// [Operation names](#operation-names) section above. If the operation is "send",
+	// this attribute MUST NOT be set, since the operation can be inferred from the
+	// span kind in that case.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	MessagingOperationKey = attribute.Key("messaging.operation")
+	// The identifier for the consumer receiving a message. For Kafka, set it to
+	// `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are
+	// present, or only `messaging.kafka.consumer_group`. For brokers, such as
+	// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+	// message.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'mygroup - client-6'
+	MessagingConsumerIDKey = attribute.Key("messaging.consumer_id")
+)
+
+var (
+	// receive
+	MessagingOperationReceive = MessagingOperationKey.String("receive")
+	// process
+	MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// Attributes for RabbitMQ
+const (
+	// RabbitMQ message routing key.
+	//
+	// Type: string
+	// Required: Unless it is empty.
+	// Stability: stable
+	// Examples: 'myKey'
+	MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key")
+)
+
+// Attributes for Apache Kafka
+const (
+	// Message keys in Kafka are used for grouping alike messages to ensure they're
+	// processed on the same partition. They differ from `messaging.message_id` in
+	// that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'myKey'
+	// Note: If the key type is not string, it's string representation has to be
+	// supplied for the attribute. If the key has no unambiguous, canonical string
+	// form, don't include its value.
+	MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key")
+	// Name of the Kafka Consumer Group that is handling the message. Only applies to
+	// consumers, not producers.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'my-group'
+	MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group")
+	// Client ID for the Consumer or Producer that is handling the message.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'client-5'
+	MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+	// Partition the message is sent to.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 2
+	MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition")
+	// A boolean that is true if the message is a tombstone.
+	//
+	// Type: boolean
+	// Required: If missing, it is assumed to be false.
+	// Stability: stable
+	MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone")
+)
+
+// Attributes for Apache RocketMQ
+const (
+	// Namespace of RocketMQ resources, resources in different namespaces are
+	// individual.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'myNamespace'
+	MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+	// Name of the RocketMQ producer/consumer group that is handling the message. The
+	// client type is identified by the SpanKind.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'myConsumerGroup'
+	MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+	// The unique identifier for each client.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'myhost@8742@s8083jm'
+	MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+	// Type of message.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type")
+	// The secondary classifier of message besides topic.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'tagA'
+	MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag")
+	// Key(s) of message, another way to mark message besides message id.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'keyA', 'keyB'
+	MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys")
+	// Model of message consumption. This only applies to consumer spans.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+	// Normal message
+	MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+	// FIFO message
+	MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+	// Delay message
+	MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+	// Transaction message
+	MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+	// Clustering consumption model
+	MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+	// Broadcasting consumption model
+	MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// This document defines semantic conventions for remote procedure calls.
+const (
+	// A string identifying the remoting system. See below for a list of well-known
+	// identifiers.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	RPCSystemKey = attribute.Key("rpc.system")
+	// The full (logical) name of the service being called, including its package
+	// name, if applicable.
+	//
+	// Type: string
+	// Required: No, but recommended
+	// Stability: stable
+	// Examples: 'myservice.EchoService'
+	// Note: This is the logical name of the service from the RPC interface
+	// perspective, which can be different from the name of any implementing class.
+	// The `code.namespace` attribute may be used to store the latter (despite the
+	// attribute name, it may include a class name; e.g., class with method actually
+	// executing the call on the server side, RPC client stub class on the client
+	// side).
+	RPCServiceKey = attribute.Key("rpc.service")
+	// The name of the (logical) method being called, must be equal to the $method
+	// part in the span name.
+	//
+	// Type: string
+	// Required: No, but recommended
+	// Stability: stable
+	// Examples: 'exampleMethod'
+	// Note: This is the logical name of the method from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// method/function. The `code.function` attribute may be used to store the latter
+	// (e.g., method actually executing the call on the server side, RPC client stub
+	// method on the client side).
+	RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+	// gRPC
+	RPCSystemGRPC = RPCSystemKey.String("grpc")
+	// Java RMI
+	RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+	// .NET WCF
+	RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+	// Apache Dubbo
+	RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// Tech-specific attributes for gRPC.
+const (
+	// The [numeric status
+	// code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC
+	// request.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+	// OK
+	RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+	// CANCELLED
+	RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+	// UNKNOWN
+	RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+	// INVALID_ARGUMENT
+	RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+	// DEADLINE_EXCEEDED
+	RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+	// NOT_FOUND
+	RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+	// ALREADY_EXISTS
+	RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+	// PERMISSION_DENIED
+	RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+	// RESOURCE_EXHAUSTED
+	RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+	// FAILED_PRECONDITION
+	RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+	// ABORTED
+	RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+	// OUT_OF_RANGE
+	RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+	// UNIMPLEMENTED
+	RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+	// INTERNAL
+	RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+	// UNAVAILABLE
+	RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+	// DATA_LOSS
+	RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+	// UNAUTHENTICATED
+	RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+	// Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC
+	// 1.0 does not specify this, the value can be omitted.
+	//
+	// Type: string
+	// Required: If missing, it is assumed to be "1.0".
+	// Stability: stable
+	// Examples: '2.0', '1.0'
+	RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+	// `id` property of request or response. Since protocol allows id to be int,
+	// string, `null` or missing (for notifications), value is expected to be cast to
+	// string for simplicity. Use empty string in case of `null` value. Omit entirely
+	// if this is a notification.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '10', 'request-7', ''
+	RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+	// `error.code` property of response if it is an error response.
+	//
+	// Type: int
+	// Required: If missing, response is assumed to be successful.
+	// Stability: stable
+	// Examples: -32700, 100
+	RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+	// `error.message` property of response if it is an error response.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Parse error', 'User already exists'
+	RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPC received/sent message.
+const (
+	// Whether this is a received or sent message.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	MessageTypeKey = attribute.Key("message.type")
+	// MUST be calculated as two different counters starting from `1` one for sent
+	// messages and one for received message.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Note: This way we guarantee that the values will be consistent between
+	// different implementations.
+	MessageIDKey = attribute.Key("message.id")
+	// Compressed size of the message in bytes.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+	// Uncompressed size of the message in bytes.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+	// sent
+	MessageTypeSent = MessageTypeKey.String("SENT")
+	// received
+	MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go
new file mode 100644
index 000000000..181fcc9c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.12.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go
new file mode 100644
index 000000000..d68927094
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+const (
+	// ExceptionEventName is the name of the Span event representing an exception.
+	ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go
new file mode 100644
index 000000000..4b4f3cbaf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go
@@ -0,0 +1,114 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+import (
+	"net/http"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/semconv/internal"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// HTTP scheme attributes.
+var (
+	HTTPSchemeHTTP  = HTTPSchemeKey.String("http")
+	HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
+
+var sc = &internal.SemanticConventions{
+	EnduserIDKey:                EnduserIDKey,
+	HTTPClientIPKey:             HTTPClientIPKey,
+	HTTPFlavorKey:               HTTPFlavorKey,
+	HTTPHostKey:                 HTTPHostKey,
+	HTTPMethodKey:               HTTPMethodKey,
+	HTTPRequestContentLengthKey: HTTPRequestContentLengthKey,
+	HTTPRouteKey:                HTTPRouteKey,
+	HTTPSchemeHTTP:              HTTPSchemeHTTP,
+	HTTPSchemeHTTPS:             HTTPSchemeHTTPS,
+	HTTPServerNameKey:           HTTPServerNameKey,
+	HTTPStatusCodeKey:           HTTPStatusCodeKey,
+	HTTPTargetKey:               HTTPTargetKey,
+	HTTPURLKey:                  HTTPURLKey,
+	HTTPUserAgentKey:            HTTPUserAgentKey,
+	NetHostIPKey:                NetHostIPKey,
+	NetHostNameKey:              NetHostNameKey,
+	NetHostPortKey:              NetHostPortKey,
+	NetPeerIPKey:                NetPeerIPKey,
+	NetPeerNameKey:              NetPeerNameKey,
+	NetPeerPortKey:              NetPeerPortKey,
+	NetTransportIP:              NetTransportIP,
+	NetTransportOther:           NetTransportOther,
+	NetTransportTCP:             NetTransportTCP,
+	NetTransportUDP:             NetTransportUDP,
+	NetTransportUnix:            NetTransportUnix,
+}
+
+// NetAttributesFromHTTPRequest generates attributes of the net
+// namespace as specified by the OpenTelemetry specification for a
+// span.  The network parameter is a string that net.Dial function
+// from standard library can understand.
+func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue {
+	return sc.NetAttributesFromHTTPRequest(network, request)
+}
+
+// EndUserAttributesFromHTTPRequest generates attributes of the
+// enduser namespace as specified by the OpenTelemetry specification
+// for a span.
+func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+	return sc.EndUserAttributesFromHTTPRequest(request)
+}
+
+// HTTPClientAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the client side.
+func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue {
+	return sc.HTTPClientAttributesFromHTTPRequest(request)
+}
+
+// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes
+// to be used with server-side HTTP metrics.
+func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue {
+	return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request)
+}
+
+// HTTPServerAttributesFromHTTPRequest generates attributes of the
+// http namespace as specified by the OpenTelemetry specification for
+// a span on the server side. Currently, only basic authentication is
+// supported.
+func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue {
+	return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request)
+}
+
+// HTTPAttributesFromHTTPStatusCode generates attributes of the http
+// namespace as specified by the OpenTelemetry specification for a
+// span.
+func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue {
+	return sc.HTTPAttributesFromHTTPStatusCode(code)
+}
+
+// SpanStatusFromHTTPStatusCode generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) {
+	return internal.SpanStatusFromHTTPStatusCode(code)
+}
+
+// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message
+// as specified by the OpenTelemetry specification for a span.
+// Exclude 4xx for SERVER to set the appropriate status.
+func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) {
+	return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go
new file mode 100644
index 000000000..b2155676f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go
@@ -0,0 +1,1042 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device).
+const (
+	// Array of brand name and version separated by a space
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (navigator.userAgentData.brands).
+	BrowserBrandsKey = attribute.Key("browser.brands")
+	// The platform on which the browser is running
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Windows', 'macOS', 'Android'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (navigator.userAgentData.platform). If unavailable, the legacy
+	// `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
+	// be left unset in order for the values to be consistent.
+	// The list of possible values is defined in the [W3C User-Agent Client Hints
+	// specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+	// Note that some (but not all) of these values can overlap with values in the
+	// [os.type and os.name attributes](./os.md). However, for consistency, the values
+	// in the `browser.platform` attribute should capture the exact value that the
+	// user agent provides.
+	BrowserPlatformKey = attribute.Key("browser.platform")
+	// Full user-agent string provided by the browser
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36
+	// (KHTML, '
+	//  'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
+	// Note: The user-agent value SHOULD be provided only from browsers that do not
+	// have a mechanism to retrieve brands and platform individually from the User-
+	// Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent`
+	// API can be used.
+	BrowserUserAgentKey = attribute.Key("browser.user_agent")
+)
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+	// Name of the cloud provider.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	CloudProviderKey = attribute.Key("cloud.provider")
+	// The cloud account ID the resource is assigned to.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '111111111111', 'opentelemetry'
+	CloudAccountIDKey = attribute.Key("cloud.account.id")
+	// The geographical region the resource is running.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'us-central1', 'us-east-1'
+	// Note: Refer to your provider's docs to see the available regions, for example
+	// [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-
+	// detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-
+	// infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-
+	// us/global-infrastructure/geographies/), [Google Cloud
+	// regions](https://cloud.google.com/about/locations), or [Tencent Cloud
+	// regions](https://intl.cloud.tencent.com/document/product/213/6091).
+	CloudRegionKey = attribute.Key("cloud.region")
+	// Cloud regions often have multiple, isolated locations known as zones to
+	// increase availability. Availability zone represents the zone where the resource
+	// is running.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'us-east-1c'
+	// Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
+	CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+	// The cloud platform in use.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Note: The prefix of the service SHOULD match the one specified in
+	// `cloud.provider`.
+	CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+	// Alibaba Cloud
+	CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	CloudProviderAWS = CloudProviderKey.String("aws")
+	// Microsoft Azure
+	CloudProviderAzure = CloudProviderKey.String("azure")
+	// Google Cloud Platform
+	CloudProviderGCP = CloudProviderKey.String("gcp")
+	// Tencent Cloud
+	CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+	// Alibaba Cloud Elastic Compute Service
+	CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+	// Alibaba Cloud Function Compute
+	CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+	// AWS Elastic Compute Cloud
+	CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+	// AWS Elastic Container Service
+	CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+	// AWS Elastic Kubernetes Service
+	CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+	// AWS Lambda
+	CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+	// AWS Elastic Beanstalk
+	CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+	// AWS App Runner
+	CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+	// Azure Virtual Machines
+	CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+	// Azure Container Instances
+	CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+	// Azure Kubernetes Service
+	CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+	// Azure Functions
+	CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+	// Azure App Service
+	CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+	// Google Cloud Compute Engine (GCE)
+	CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+	// Google Cloud Run
+	CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+	// Google Cloud Kubernetes Engine (GKE)
+	CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+	// Google Cloud Functions (GCF)
+	CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+	// Google Cloud App Engine (GAE)
+	CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+	// Tencent Cloud Cloud Virtual Machine (CVM)
+	CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+	// Tencent Cloud Elastic Kubernetes Service (EKS)
+	CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+	// Tencent Cloud Serverless Cloud Function (SCF)
+	CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+	// The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.
+	// amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-
+	// west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+	AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+	// The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo
+	// perguide/clusters.html).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+	// The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l
+	// aunch_types.html) for an ECS task.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+	// The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates
+	// t/developerguide/task_definitions.html).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-
+	// west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+	// The task definition family this task definition is a member of.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-family'
+	AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+	// The revision for this task definition.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '8', '26'
+	AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+	// ec2
+	AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+	// fargate
+	AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+	// The ARN of an EKS cluster.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// Resources specific to Amazon Web Services.
+const (
+	// The name(s) of the AWS log group(s) an application is writing to.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+	// Note: Multiple log groups must be supported for cases like multi-container
+	// applications, where a single application has sidecar containers, and each write
+	// to their own log group.
+	AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+	// The Amazon Resource Name(s) (ARN) of the AWS log group(s).
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+	// Note: See the [log group ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+	// access-control-overview-cwl.html#CWL_ARN_Format).
+	AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+	// The name(s) of the AWS log stream(s) an application is writing to.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+	// The ARN(s) of the AWS log stream(s).
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-
+	// stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	// Note: See the [log stream ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-
+	// access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain
+	// several log streams, so these ARNs necessarily identify both a log group and a
+	// log stream.
+	AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// A container instance.
+const (
+	// Container name used by container runtime.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-autoconf'
+	ContainerNameKey = attribute.Key("container.name")
+	// Container ID. Usually a UUID, as for example used to [identify Docker
+	// containers](https://docs.docker.com/engine/reference/run/#container-
+	// identification). The UUID might be abbreviated.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'a3bf90e006b2'
+	ContainerIDKey = attribute.Key("container.id")
+	// The container runtime managing this container.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'docker', 'containerd', 'rkt'
+	ContainerRuntimeKey = attribute.Key("container.runtime")
+	// Name of the image the container was built on.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'gcr.io/opentelemetry/operator'
+	ContainerImageNameKey = attribute.Key("container.image.name")
+	// Container image tag.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '0.1'
+	ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// The software deployment.
+const (
+	// Name of the [deployment
+	// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+	// deployment tier).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'staging', 'production'
+	DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// The device on which the process represented by this resource is running.
+const (
+	// A unique identifier representing the device
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+	// Note: The device identifier MUST only be defined using the values outlined
+	// below. This value is not an advertising identifier and MUST NOT be used as
+	// such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id
+	// entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden
+	// tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the
+	// Firebase Installation ID or a globally unique UUID which is persisted across
+	// sessions in your application. More information can be found
+	// [here](https://developer.android.com/training/articles/user-data-ids) on best
+	// practices and exact implementation details. Caution should be taken when
+	// storing personal data or anything which can identify a user. GDPR and data
+	// protection laws may apply, ensure you do your own due diligence.
+	DeviceIDKey = attribute.Key("device.id")
+	// The model identifier for the device
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'iPhone3,4', 'SM-G920F'
+	// Note: It's recommended this value represents a machine readable version of the
+	// model identifier rather than the market or consumer-friendly name of the
+	// device.
+	DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+	// The marketing name for the device model
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+	// Note: It's recommended this value represents a human readable version of the
+	// device model rather than a machine readable alternative.
+	DeviceModelNameKey = attribute.Key("device.model.name")
+	// The name of the device manufacturer
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Apple', 'Samsung'
+	// Note: The Android OS provides this field via
+	// [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+	// iOS apps SHOULD hardcode the value `Apple`.
+	DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// A serverless instance.
+const (
+	// The name of the single function that this runtime instance executes.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+	// Note: This is the name of the function as configured/deployed on the FaaS
+	// platform and is usually different from the name of the callback
+	// function (which may be stored in the
+	// [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-
+	// general.md#source-code-attributes)
+	// span attributes).
+
+	// For some cloud providers, the above definition is ambiguous. The following
+	// definition of function name MUST be used for this attribute
+	// (and consequently the span name) for the listed cloud providers/products:
+
+	// * **Azure:**  The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+	//   followed by a forward slash followed by the function name (this form
+	//   can also be seen in the resource JSON for the function).
+	//   This means that a span attribute MUST be used, as an Azure function
+	//   app can host multiple functions that would usually share
+	//   a TracerProvider (see also the `faas.id` attribute).
+	FaaSNameKey = attribute.Key("faas.name")
+	// The unique ID of the single function that this runtime instance executes.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+	// Note: On some cloud providers, it may not be possible to determine the full ID
+	// at startup,
+	// so consider setting `faas.id` as a span attribute instead.
+
+	// The exact value to use for `faas.id` depends on the cloud provider:
+
+	// * **AWS Lambda:** The function
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-
+	// namespaces.html).
+	//   Take care not to use the "invoked ARN" directly but replace any
+	//   [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+	// aliases.html)
+	//   with the resolved function version, as the same runtime instance may be
+	// invokable with
+	//   multiple different aliases.
+	// * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-
+	// resource-names)
+	// * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-
+	// us/rest/api/resources/resources/get-by-id) of the invoked function,
+	//   *not* the function app, having the form
+	//   `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.We
+	// b/sites/<FUNCAPP>/functions/<FUNC>`.
+	//   This means that a span attribute MUST be used, as an Azure function app can
+	// host multiple functions that would usually share
+	//   a TracerProvider.
+	FaaSIDKey = attribute.Key("faas.id")
+	// The immutable version of the function being executed.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '26', 'pinkfroid-00002'
+	// Note: Depending on the cloud provider and platform, use:
+
+	// * **AWS Lambda:** The [function
+	// version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-
+	// versions.html)
+	//   (an integer represented as a decimal string).
+	// * **Google Cloud Run:** The
+	// [revision](https://cloud.google.com/run/docs/managing/revisions)
+	//   (i.e., the function name plus the revision suffix).
+	// * **Google Cloud Functions:** The value of the
+	//   [`K_REVISION` environment
+	// variable](https://cloud.google.com/functions/docs/env-
+	// var#runtime_environment_variables_set_automatically).
+	// * **Azure Functions:** Not applicable. Do not set this attribute.
+	FaaSVersionKey = attribute.Key("faas.version")
+	// The execution environment ID as a string, that will be potentially reused for
+	// other invocations to the same function/function version.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+	// Note: * **AWS Lambda:** Use the (full) log stream name.
+	FaaSInstanceKey = attribute.Key("faas.instance")
+	// The amount of memory available to the serverless function in MiB.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 128
+	// Note: It's recommended to set this attribute since e.g. too little memory can
+	// easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+	// the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+	// information.
+	FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// A host is defined as a general computing instance.
+const (
+	// Unique host ID. For Cloud, this must be the instance_id assigned by the cloud
+	// provider.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-test'
+	HostIDKey = attribute.Key("host.id")
+	// Name of the host. On Unix systems, it may contain what the hostname command
+	// returns, or the fully qualified hostname, or another name specified by the
+	// user.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-test'
+	HostNameKey = attribute.Key("host.name")
+	// Type of host. For Cloud, this must be the machine type.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'n1-standard-1'
+	HostTypeKey = attribute.Key("host.type")
+	// The CPU architecture the host system is running on.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	HostArchKey = attribute.Key("host.arch")
+	// Name of the VM image or OS install the host was instantiated from.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+	HostImageNameKey = attribute.Key("host.image.name")
+	// VM image ID. For Cloud, this value is from the provider.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'ami-07b06b442921831e5'
+	HostImageIDKey = attribute.Key("host.image.id")
+	// The version string of the VM image as defined in [Version
+	// Attributes](README.md#version-attributes).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '0.1'
+	HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+	// AMD64
+	HostArchAMD64 = HostArchKey.String("amd64")
+	// ARM32
+	HostArchARM32 = HostArchKey.String("arm32")
+	// ARM64
+	HostArchARM64 = HostArchKey.String("arm64")
+	// Itanium
+	HostArchIA64 = HostArchKey.String("ia64")
+	// 32-bit PowerPC
+	HostArchPPC32 = HostArchKey.String("ppc32")
+	// 64-bit PowerPC
+	HostArchPPC64 = HostArchKey.String("ppc64")
+	// IBM z/Architecture
+	HostArchS390x = HostArchKey.String("s390x")
+	// 32-bit x86
+	HostArchX86 = HostArchKey.String("x86")
+)
+
+// A Kubernetes Cluster.
+const (
+	// The name of the cluster.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-cluster'
+	K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// A Kubernetes Node object.
+const (
+	// The name of the Node.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'node-1'
+	K8SNodeNameKey = attribute.Key("k8s.node.name")
+	// The UID of the Node.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+	K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// A Kubernetes Namespace.
+const (
+	// The name of the namespace that the pod is running in.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'default'
+	K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// A Kubernetes Pod object.
+const (
+	// The UID of the Pod.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+	// The name of the Pod.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry-pod-autoconf'
+	K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+	// The name of the Container from Pod specification, must be unique within a Pod.
+	// Container runtime usually uses different globally unique name
+	// (`container.name`).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'redis'
+	K8SContainerNameKey = attribute.Key("k8s.container.name")
+	// Number of times the container was restarted. This attribute can be used to
+	// identify a particular container (running or stopped) within a container spec.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 0, 2
+	K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// A Kubernetes ReplicaSet object.
+const (
+	// The UID of the ReplicaSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+	// The name of the ReplicaSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// A Kubernetes Deployment object.
+const (
+	// The UID of the Deployment.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+	// The name of the Deployment.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// A Kubernetes StatefulSet object.
+const (
+	// The UID of the StatefulSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+	// The name of the StatefulSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// A Kubernetes DaemonSet object.
+const (
+	// The UID of the DaemonSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+	// The name of the DaemonSet.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// A Kubernetes Job object.
+const (
+	// The UID of the Job.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SJobUIDKey = attribute.Key("k8s.job.uid")
+	// The name of the Job.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// A Kubernetes CronJob object.
+const (
+	// The UID of the CronJob.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+	// The name of the CronJob.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// The operating system (OS) on which the process represented by this resource is running.
+const (
+	// The operating system type.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	OSTypeKey = attribute.Key("os.type")
+	// Human readable (not intended to be parsed) OS version information, like e.g.
+	// reported by `ver` or `lsb_release -a` commands.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS'
+	OSDescriptionKey = attribute.Key("os.description")
+	// Human readable operating system name.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'iOS', 'Android', 'Ubuntu'
+	OSNameKey = attribute.Key("os.name")
+	// The version string of the operating system as defined in [Version
+	// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '14.2.1', '18.04.1'
+	OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+	// Microsoft Windows
+	OSTypeWindows = OSTypeKey.String("windows")
+	// Linux
+	OSTypeLinux = OSTypeKey.String("linux")
+	// Apple Darwin
+	OSTypeDarwin = OSTypeKey.String("darwin")
+	// FreeBSD
+	OSTypeFreeBSD = OSTypeKey.String("freebsd")
+	// NetBSD
+	OSTypeNetBSD = OSTypeKey.String("netbsd")
+	// OpenBSD
+	OSTypeOpenBSD = OSTypeKey.String("openbsd")
+	// DragonFly BSD
+	OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+	// HP-UX (Hewlett Packard Unix)
+	OSTypeHPUX = OSTypeKey.String("hpux")
+	// AIX (Advanced Interactive eXecutive)
+	OSTypeAIX = OSTypeKey.String("aix")
+	// SunOS, Oracle Solaris
+	OSTypeSolaris = OSTypeKey.String("solaris")
+	// IBM z/OS
+	OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// An operating system process.
+const (
+	// Process identifier (PID).
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 1234
+	ProcessPIDKey = attribute.Key("process.pid")
+	// The name of the process executable. On Linux based systems, can be set to the
+	// `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of
+	// `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// Required: See below
+	// Stability: stable
+	// Examples: 'otelcol'
+	ProcessExecutableNameKey = attribute.Key("process.executable.name")
+	// The full path to the process executable. On Linux based systems, can be set to
+	// the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+	// `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// Required: See below
+	// Stability: stable
+	// Examples: '/usr/bin/cmd/otelcol'
+	ProcessExecutablePathKey = attribute.Key("process.executable.path")
+	// The command used to launch the process (i.e. the command name). On Linux based
+	// systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows,
+	// can be set to the first parameter extracted from `GetCommandLineW`.
+	//
+	// Type: string
+	// Required: See below
+	// Stability: stable
+	// Examples: 'cmd/otelcol'
+	ProcessCommandKey = attribute.Key("process.command")
+	// The full command used to launch the process as a single string representing the
+	// full command. On Windows, can be set to the result of `GetCommandLineW`. Do not
+	// set this if you have to assemble it just for monitoring; use
+	// `process.command_args` instead.
+	//
+	// Type: string
+	// Required: See below
+	// Stability: stable
+	// Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+	ProcessCommandLineKey = attribute.Key("process.command_line")
+	// All the command arguments (including the command/executable itself) as received
+	// by the process. On Linux-based systems (and some other Unixoid systems
+	// supporting procfs), can be set according to the list of null-delimited strings
+	// extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be
+	// the full argv vector passed to `main`.
+	//
+	// Type: string[]
+	// Required: See below
+	// Stability: stable
+	// Examples: 'cmd/otecol', '--config=config.yaml'
+	ProcessCommandArgsKey = attribute.Key("process.command_args")
+	// The username of the user that owns the process.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'root'
+	ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// The single (language) runtime instance which is monitored.
+const (
+	// The name of the runtime of this process. For compiled native binaries, this
+	// SHOULD be the name of the compiler.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'OpenJDK Runtime Environment'
+	ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+	// The version of the runtime of this process, as returned by the runtime without
+	// modification.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '14.0.2'
+	ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+	// An additional description about the runtime of the process, for example a
+	// specific vendor customization of the runtime environment.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+	ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// A service instance.
+const (
+	// Logical name of the service.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'shoppingcart'
+	// Note: MUST be the same for all instances of horizontally scaled services. If
+	// the value was not specified, SDKs MUST fallback to `unknown_service:`
+	// concatenated with [`process.executable.name`](process.md#process), e.g.
+	// `unknown_service:bash`. If `process.executable.name` is not available, the
+	// value MUST be set to `unknown_service`.
+	ServiceNameKey = attribute.Key("service.name")
+	// A namespace for `service.name`.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Shop'
+	// Note: A string value having a meaning that helps to distinguish a group of
+	// services, for example the team name that owns a group of services.
+	// `service.name` is expected to be unique within the same namespace. If
+	// `service.namespace` is not specified in the Resource then `service.name` is
+	// expected to be unique for all services that have no explicit namespace defined
+	// (so the empty/unspecified namespace is simply one more valid namespace). Zero-
+	// length namespace string is assumed equal to unspecified namespace.
+	ServiceNamespaceKey = attribute.Key("service.namespace")
+	// The string ID of the service instance.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+	// Note: MUST be unique for each instance of the same
+	// `service.namespace,service.name` pair (in other words
+	// `service.namespace,service.name,service.instance.id` triplet MUST be globally
+	// unique). The ID helps to distinguish instances of the same service that exist
+	// at the same time (e.g. instances of a horizontally scaled service). It is
+	// preferable for the ID to be persistent and stay the same for the lifetime of
+	// the service instance, however it is acceptable that the ID is ephemeral and
+	// changes during important lifetime events for the service (e.g. service
+	// restarts). If the service has no inherent unique ID that can be used as the
+	// value of this attribute it is recommended to generate a random Version 1 or
+	// Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+	// Version 5, see RFC 4122 for more recommendations).
+	ServiceInstanceIDKey = attribute.Key("service.instance.id")
+	// The version string of the service API or implementation.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '2.0.0'
+	ServiceVersionKey = attribute.Key("service.version")
+)
+
+// The telemetry SDK used to capture data recorded by the instrumentation libraries.
+const (
+	// The name of the telemetry SDK as defined above.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+	// The language of the telemetry SDK.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+	// The version string of the telemetry SDK.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '1.2.3'
+	TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+	// The version string of the auto instrumentation agent, if used.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '1.2.3'
+	TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+	// cpp
+	TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+	// dotnet
+	TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+	// erlang
+	TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+	// go
+	TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+	// java
+	TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+	// nodejs
+	TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+	// php
+	TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+	// python
+	TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+	// ruby
+	TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+	// webjs
+	TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+	// swift
+	TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime.
+const (
+	// The name of the web engine.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'WildFly'
+	WebEngineNameKey = attribute.Key("webengine.name")
+	// The version of the web engine.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '21.0.0'
+	WebEngineVersionKey = attribute.Key("webengine.version")
+	// Additional description of the web engine (e.g. detailed version and edition
+	// information).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final'
+	WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go
new file mode 100644
index 000000000..2f2a019e4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.12.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go
new file mode 100644
index 000000000..047d8e95c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go
@@ -0,0 +1,1704 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Span attributes used by AWS Lambda (in addition to general `faas` attributes).
+const (
+	// The full invoked ARN as provided on the `Context` passed to the function
+	// (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next`
+	// applicable).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+	// Note: This may be different from `faas.id` if an alias is involved.
+	AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used.
+const (
+	// The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec
+	// .md#id) uniquely identifies the event.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+	CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+	// The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m
+	// d#source-1) identifies the context in which an event happened.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my-
+	// service'
+	CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+	// The [version of the CloudEvents specification](https://github.com/cloudevents/s
+	// pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: '1.0'
+	CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+	// The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp
+	// ec.md#type) contains a value describing the type of event related to the
+	// originating occurrence.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2'
+	CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+	// The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.
+	// md#subject) of the event in the context of the event producer (identified by
+	// source).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'mynewfile.jpg'
+	CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// This document defines semantic conventions for the OpenTracing Shim
+const (
+	// Parent-child Reference type
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Note: The causal relationship between a child Span and a parent Span.
+	OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+	// The parent Span depends on the child Span in some capacity
+	OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+	// The parent Span does not depend in any way on the result of the child Span
+	OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// This document defines the attributes used to perform database client calls.
+const (
+	// An identifier for the database management system (DBMS) product being used. See
+	// below for a list of well-known identifiers.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	DBSystemKey = attribute.Key("db.system")
+	// The connection string used to connect to the database. It is recommended to
+	// remove embedded credentials.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+	DBConnectionStringKey = attribute.Key("db.connection_string")
+	// Username for accessing the database.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'readonly_user', 'reporting_user'
+	DBUserKey = attribute.Key("db.user")
+	// The fully-qualified class name of the [Java Database Connectivity
+	// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+	// used to connect.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'org.postgresql.Driver',
+	// 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+	DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+	// This attribute is used to report the name of the database being accessed. For
+	// commands that switch the database, this should be set to the target database
+	// (even if the command fails).
+	//
+	// Type: string
+	// Required: Required, if applicable.
+	// Stability: stable
+	// Examples: 'customers', 'main'
+	// Note: In some SQL databases, the database name to be used is called "schema
+	// name". In case there are multiple layers that could be considered for database
+	// name (e.g. Oracle instance name and schema name), the database name to be used
+	// is the more specific layer (e.g. Oracle schema name).
+	DBNameKey = attribute.Key("db.name")
+	// The database statement being executed.
+	//
+	// Type: string
+	// Required: Required if applicable and not explicitly disabled via
+	// instrumentation configuration.
+	// Stability: stable
+	// Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+	// Note: The value may be sanitized to exclude sensitive information.
+	DBStatementKey = attribute.Key("db.statement")
+	// The name of the operation being executed, e.g. the [MongoDB command
+	// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+	// such as `findAndModify`, or the SQL keyword.
+	//
+	// Type: string
+	// Required: Required, if `db.statement` is not applicable.
+	// Stability: stable
+	// Examples: 'findAndModify', 'HMSET', 'SELECT'
+	// Note: When setting this to an SQL keyword, it is not recommended to attempt any
+	// client-side parsing of `db.statement` just to get this property, but it should
+	// be set if the operation name is provided by the library being instrumented. If
+	// the SQL statement has an ambiguous operation, or performs more than one
+	// operation, this value may be omitted.
+	DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+	// Some other SQL database. Fallback only. See notes
+	DBSystemOtherSQL = DBSystemKey.String("other_sql")
+	// Microsoft SQL Server
+	DBSystemMSSQL = DBSystemKey.String("mssql")
+	// MySQL
+	DBSystemMySQL = DBSystemKey.String("mysql")
+	// Oracle Database
+	DBSystemOracle = DBSystemKey.String("oracle")
+	// IBM DB2
+	DBSystemDB2 = DBSystemKey.String("db2")
+	// PostgreSQL
+	DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+	// Amazon Redshift
+	DBSystemRedshift = DBSystemKey.String("redshift")
+	// Apache Hive
+	DBSystemHive = DBSystemKey.String("hive")
+	// Cloudscape
+	DBSystemCloudscape = DBSystemKey.String("cloudscape")
+	// HyperSQL DataBase
+	DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+	// Progress Database
+	DBSystemProgress = DBSystemKey.String("progress")
+	// SAP MaxDB
+	DBSystemMaxDB = DBSystemKey.String("maxdb")
+	// SAP HANA
+	DBSystemHanaDB = DBSystemKey.String("hanadb")
+	// Ingres
+	DBSystemIngres = DBSystemKey.String("ingres")
+	// FirstSQL
+	DBSystemFirstSQL = DBSystemKey.String("firstsql")
+	// EnterpriseDB
+	DBSystemEDB = DBSystemKey.String("edb")
+	// InterSystems Caché
+	DBSystemCache = DBSystemKey.String("cache")
+	// Adabas (Adaptable Database System)
+	DBSystemAdabas = DBSystemKey.String("adabas")
+	// Firebird
+	DBSystemFirebird = DBSystemKey.String("firebird")
+	// Apache Derby
+	DBSystemDerby = DBSystemKey.String("derby")
+	// FileMaker
+	DBSystemFilemaker = DBSystemKey.String("filemaker")
+	// Informix
+	DBSystemInformix = DBSystemKey.String("informix")
+	// InstantDB
+	DBSystemInstantDB = DBSystemKey.String("instantdb")
+	// InterBase
+	DBSystemInterbase = DBSystemKey.String("interbase")
+	// MariaDB
+	DBSystemMariaDB = DBSystemKey.String("mariadb")
+	// Netezza
+	DBSystemNetezza = DBSystemKey.String("netezza")
+	// Pervasive PSQL
+	DBSystemPervasive = DBSystemKey.String("pervasive")
+	// PointBase
+	DBSystemPointbase = DBSystemKey.String("pointbase")
+	// SQLite
+	DBSystemSqlite = DBSystemKey.String("sqlite")
+	// Sybase
+	DBSystemSybase = DBSystemKey.String("sybase")
+	// Teradata
+	DBSystemTeradata = DBSystemKey.String("teradata")
+	// Vertica
+	DBSystemVertica = DBSystemKey.String("vertica")
+	// H2
+	DBSystemH2 = DBSystemKey.String("h2")
+	// ColdFusion IMQ
+	DBSystemColdfusion = DBSystemKey.String("coldfusion")
+	// Apache Cassandra
+	DBSystemCassandra = DBSystemKey.String("cassandra")
+	// Apache HBase
+	DBSystemHBase = DBSystemKey.String("hbase")
+	// MongoDB
+	DBSystemMongoDB = DBSystemKey.String("mongodb")
+	// Redis
+	DBSystemRedis = DBSystemKey.String("redis")
+	// Couchbase
+	DBSystemCouchbase = DBSystemKey.String("couchbase")
+	// CouchDB
+	DBSystemCouchDB = DBSystemKey.String("couchdb")
+	// Microsoft Azure Cosmos DB
+	DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+	// Amazon DynamoDB
+	DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+	// Neo4j
+	DBSystemNeo4j = DBSystemKey.String("neo4j")
+	// Apache Geode
+	DBSystemGeode = DBSystemKey.String("geode")
+	// Elasticsearch
+	DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+	// Memcached
+	DBSystemMemcached = DBSystemKey.String("memcached")
+	// CockroachDB
+	DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+)
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+	// The Microsoft SQL Server [instance name](https://docs.microsoft.com/en-
+	// us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+	// connecting to. This name is used to determine the port of a named instance.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'MSSQLSERVER'
+	// Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer
+	// required (but still recommended if non-standard).
+	DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// Call-level attributes for Cassandra
+const (
+	// The fetch size used for paging, i.e. how many rows will be returned at once.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 5000
+	DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+	// The consistency level of the query. Based on consistency values from
+	// [CQL](https://docs.datastax.com/en/cassandra-
+	// oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+	// The name of the primary table that the operation is acting upon, including the
+	// keyspace name (if applicable).
+	//
+	// Type: string
+	// Required: Recommended if available.
+	// Stability: stable
+	// Examples: 'mytable'
+	// Note: This mirrors the db.sql.table attribute but references cassandra rather
+	// than sql. It is not recommended to attempt any client-side parsing of
+	// `db.statement` just to get this property, but it should be set if it is
+	// provided by the library being instrumented. If the operation is acting upon an
+	// anonymous table, or more than one table, this value MUST NOT be set.
+	DBCassandraTableKey = attribute.Key("db.cassandra.table")
+	// Whether or not the query is idempotent.
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+	// The number of times a query was speculatively executed. Not set or `0` if the
+	// query was not executed speculatively.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 0, 2
+	DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+	// The ID of the coordinating node for a query.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+	DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+	// The data center of the coordinating node for a query.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'us-west-2'
+	DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+	// all
+	DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+	// each_quorum
+	DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+	// quorum
+	DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+	// local_quorum
+	DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+	// one
+	DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+	// two
+	DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+	// three
+	DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+	// local_one
+	DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+	// any
+	DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+	// serial
+	DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+	// local_serial
+	DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Call-level attributes for Redis
+const (
+	// The index of the database being accessed as used in the [`SELECT`
+	// command](https://redis.io/commands/select), provided as an integer. To be used
+	// instead of the generic `db.name` attribute.
+	//
+	// Type: int
+	// Required: Required, if other than the default database (`0`).
+	// Stability: stable
+	// Examples: 0, 1, 15
+	DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// Call-level attributes for MongoDB
+const (
+	// The collection being accessed within the database stated in `db.name`.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'customers', 'products'
+	DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// Call-level attributes for SQL databases
+const (
+	// The name of the primary table that the operation is acting upon, including the
+	// database name (if applicable).
+	//
+	// Type: string
+	// Required: Recommended if available.
+	// Stability: stable
+	// Examples: 'public.users', 'customers'
+	// Note: It is not recommended to attempt any client-side parsing of
+	// `db.statement` just to get this property, but it should be set if it is
+	// provided by the library being instrumented. If the operation is acting upon an
+	// anonymous table, or more than one table, this value MUST NOT be set.
+	DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// This document defines the attributes used to report a single exception associated with a span.
+const (
+	// The type of the exception (its fully-qualified class name, if applicable). The
+	// dynamic type of the exception should be preferred over the static type in
+	// languages that support it.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'java.net.ConnectException', 'OSError'
+	ExceptionTypeKey = attribute.Key("exception.type")
+	// The exception message.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Division by zero', "Can't convert 'int' object to str implicitly"
+	ExceptionMessageKey = attribute.Key("exception.message")
+	// A stacktrace as a string in the natural representation for the language
+	// runtime. The representation is to be determined and documented by each language
+	// SIG.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+	// exception\\n at '
+	//  'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+	ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+	// SHOULD be set to true if the exception event is recorded at a point where it is
+	// known that the exception is escaping the scope of the span.
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	// Note: An exception is considered to have escaped (or left) the scope of a span,
+	// if that span is ended while the exception is still logically "in flight".
+	// This may be actually "in flight" in some languages (e.g. if the exception
+	// is passed to a Context manager's `__exit__` method in Python) but will
+	// usually be caught at the point of recording the exception in most languages.
+
+	// It is usually not possible to determine at the point where an exception is
+	// thrown
+	// whether it will escape the scope of a span.
+	// However, it is trivial to know that an exception
+	// will escape, if one checks for an active exception just before ending the span,
+	// as done in the [example above](#recording-an-exception).
+
+	// It follows that an exception may still escape the scope of the span
+	// even if the `exception.escaped` attribute was not set or set to false,
+	// since the event might have been recorded at a time where it was not
+	// clear whether the exception will escape.
+	ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans.
+const (
+	// Type of the trigger which caused this function execution.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Note: For the server/consumer span on the incoming side,
+	// `faas.trigger` MUST be set.
+
+	// Clients invoking FaaS instances usually cannot set `faas.trigger`,
+	// since they would typically need to look in the payload to determine
+	// the event type. If clients set it, it should be the same as the
+	// trigger that corresponding incoming would have (i.e., this has
+	// nothing to do with the underlying transport used to make the API
+	// call to invoke the lambda, which is often HTTP).
+	FaaSTriggerKey = attribute.Key("faas.trigger")
+	// The execution ID of the current function execution.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+	FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+	// A response to some data source operation such as a database or filesystem read/write
+	FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+	// To provide an answer to an inbound HTTP request
+	FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+	// A function is set to be executed when messages are sent to a messaging system
+	FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+	// A function is scheduled to be executed regularly
+	FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+	// If none of the others apply
+	FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write.
+const (
+	// The name of the source on which the triggering operation was performed. For
+	// example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos
+	// DB to the database name.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'myBucketName', 'myDBName'
+	FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+	// Describes the type of the operation that was performed on the data.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+	// A string containing the time when the data was accessed in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+	// in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+	// The document name/table subjected to the operation. For example, in Cloud
+	// Storage or S3 is the name of the file, and in Cosmos DB the table name.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'myFile.txt', 'myTableName'
+	FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+	// When a new object is created
+	FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+	// When an object is modified
+	FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+	// When an object is deleted
+	FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+	// A string containing the function invocation time in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed
+	// in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSTimeKey = attribute.Key("faas.time")
+	// A string containing the schedule period as [Cron Expression](https://docs.oracl
+	// e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '0/5 * * * ? *'
+	FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+	// A boolean that is true if the serverless function is executed for the first
+	// time (aka cold-start).
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+	// The name of the invoked function.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'my-function'
+	// Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+	// function.
+	FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+	// The cloud provider of the invoked function.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	// Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked
+	// function.
+	FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+	// The cloud region of the invoked function.
+	//
+	// Type: string
+	// Required: For some cloud providers, like AWS or GCP, the region in which a
+	// function is hosted is essential to uniquely identify the function and also part
+	// of its endpoint. Since it's part of the endpoint being called, the region is
+	// always known to clients. In these cases, `faas.invoked_region` MUST be set
+	// accordingly. If the region is unknown to the client or not required for
+	// identifying the invoked function, setting `faas.invoked_region` is optional.
+	// Stability: stable
+	// Examples: 'eu-central-1'
+	// Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+	// function.
+	FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+	// Alibaba Cloud
+	FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+	// Microsoft Azure
+	FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+	// Google Cloud Platform
+	FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+	// Tencent Cloud
+	FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// These attributes may be used for any network related operation.
+const (
+	// Transport protocol used. See note below.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	NetTransportKey = attribute.Key("net.transport")
+	// Remote address of the peer (dotted decimal for IPv4 or
+	// [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6)
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '127.0.0.1'
+	NetPeerIPKey = attribute.Key("net.peer.ip")
+	// Remote port number.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 80, 8080, 443
+	NetPeerPortKey = attribute.Key("net.peer.port")
+	// Remote hostname or similar, see note below.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'example.com'
+	// Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra
+	// DNS lookup.
+	NetPeerNameKey = attribute.Key("net.peer.name")
+	// Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '192.168.0.1'
+	NetHostIPKey = attribute.Key("net.host.ip")
+	// Like `net.peer.port` but for the host port.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 35555
+	NetHostPortKey = attribute.Key("net.host.port")
+	// Local hostname or similar, see note below.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'localhost'
+	NetHostNameKey = attribute.Key("net.host.name")
+	// The internet connection type currently being used by the host.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Examples: 'wifi'
+	NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+	// This describes more details regarding the connection.type. It may be the type
+	// of cell technology connection, but it could be used for describing details
+	// about a wifi connection.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Examples: 'LTE'
+	NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+	// The name of the mobile carrier.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'sprint'
+	NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+	// The mobile carrier country code.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '310'
+	NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+	// The mobile carrier network code.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '001'
+	NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+	// The ISO 3166-1 alpha-2 2-character country code associated with the mobile
+	// carrier network.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'DE'
+	NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+	// ip_tcp
+	NetTransportTCP = NetTransportKey.String("ip_tcp")
+	// ip_udp
+	NetTransportUDP = NetTransportKey.String("ip_udp")
+	// Another IP-based protocol
+	NetTransportIP = NetTransportKey.String("ip")
+	// Unix Domain socket. See below
+	NetTransportUnix = NetTransportKey.String("unix")
+	// Named or anonymous pipe. See note below
+	NetTransportPipe = NetTransportKey.String("pipe")
+	// In-process communication
+	NetTransportInProc = NetTransportKey.String("inproc")
+	// Something else (non IP-based)
+	NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+	// wifi
+	NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+	// wired
+	NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+	// cell
+	NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+	// unavailable
+	NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+	// unknown
+	NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+	// GPRS
+	NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+	// EDGE
+	NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+	// UMTS
+	NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+	// CDMA
+	NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+	// EVDO Rel. 0
+	NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+	// EVDO Rev. A
+	NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+	// CDMA2000 1XRTT
+	NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+	// HSDPA
+	NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+	// HSUPA
+	NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+	// HSPA
+	NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+	// IDEN
+	NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+	// EVDO Rev. B
+	NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+	// LTE
+	NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+	// EHRPD
+	NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+	// HSPAP
+	NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+	// GSM
+	NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+	// TD-SCDMA
+	NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+	// IWLAN
+	NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+	// 5G NR (New Radio)
+	NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+	// 5G NRNSA (New Radio Non-Standalone)
+	NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+	// LTE CA
+	NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// Operations that access some remote service.
+const (
+	// The [`service.name`](../../resource/semantic_conventions/README.md#service) of
+	// the remote service. SHOULD be equal to the actual `service.name` resource
+	// attribute of the remote service if any.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'AuthTokenCache'
+	PeerServiceKey = attribute.Key("peer.service")
+)
+
+// These attributes may be used for any operation with an authenticated and/or authorized enduser.
+const (
+	// Username or client_id extracted from the access token or
+	// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the
+	// inbound request from outside the system.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'username'
+	EnduserIDKey = attribute.Key("enduser.id")
+	// Actual/assumed role the client is making the request under extracted from token
+	// or application security context.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'admin'
+	EnduserRoleKey = attribute.Key("enduser.role")
+	// Scopes or granted authorities the client currently possesses extracted from
+	// token or application security context. The value would come from the scope
+	// associated with an [OAuth 2.0 Access
+	// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value
+	// in a [SAML 2.0 Assertion](http://docs.oasis-
+	// open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'read:message, write:files'
+	EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// These attributes may be used for any operation to store information about a thread that started a span.
+const (
+	// Current "managed" thread ID (as opposed to OS thread ID).
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 42
+	ThreadIDKey = attribute.Key("thread.id")
+	// Current thread name.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'main'
+	ThreadNameKey = attribute.Key("thread.name")
+)
+
+// These attributes allow to report this unit of code and therefore to provide more context about the span.
+const (
+	// The method or function name, or equivalent (usually rightmost part of the code
+	// unit's name).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'serveRequest'
+	CodeFunctionKey = attribute.Key("code.function")
+	// The "namespace" within which `code.function` is defined. Usually the qualified
+	// class or module name, such that `code.namespace` + some separator +
+	// `code.function` form a unique identifier for the code unit.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'com.example.MyHTTPService'
+	CodeNamespaceKey = attribute.Key("code.namespace")
+	// The source code file name that identifies the code unit as uniquely as possible
+	// (preferably an absolute file path).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '/usr/local/MyApplication/content_root/app/index.php'
+	CodeFilepathKey = attribute.Key("code.filepath")
+	// The line number in `code.filepath` best representing the operation. It SHOULD
+	// point within the code unit named in `code.function`.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 42
+	CodeLineNumberKey = attribute.Key("code.lineno")
+)
+
+// This document defines semantic conventions for HTTP client and server Spans.
+const (
+	// HTTP request method.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'GET', 'POST', 'HEAD'
+	HTTPMethodKey = attribute.Key("http.method")
+	// Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`.
+	// Usually the fragment is not transmitted over HTTP, but if it is known, it
+	// should be included nevertheless.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+	// Note: `http.url` MUST NOT contain credentials passed via URL in form of
+	// `https://username:password@www.example.com/`. In such case the attribute's
+	// value should be `https://www.example.com/`.
+	HTTPURLKey = attribute.Key("http.url")
+	// The full request target as passed in a HTTP request line or equivalent.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '/path/12314/?q=ddds#123'
+	HTTPTargetKey = attribute.Key("http.target")
+	// The value of the [HTTP host
+	// header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header
+	// should also be reported, see note.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'www.example.org'
+	// Note: When the header is present but empty the attribute SHOULD be set to the
+	// empty string. Note that this is a valid situation that is expected in certain
+	// cases, according the aforementioned [section of RFC
+	// 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not
+	// set the attribute MUST NOT be set.
+	HTTPHostKey = attribute.Key("http.host")
+	// The URI scheme identifying the used protocol.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'http', 'https'
+	HTTPSchemeKey = attribute.Key("http.scheme")
+	// [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6).
+	//
+	// Type: int
+	// Required: If and only if one was received/sent.
+	// Stability: stable
+	// Examples: 200
+	HTTPStatusCodeKey = attribute.Key("http.status_code")
+	// Kind of HTTP protocol used.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	// Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP`
+	// except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed.
+	HTTPFlavorKey = attribute.Key("http.flavor")
+	// Value of the [HTTP User-
+	// Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the
+	// client.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+	HTTPUserAgentKey = attribute.Key("http.user_agent")
+	// The size of the request payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as the
+	// [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
+	// requests using transport encoding, this should be the compressed size.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 3495
+	HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+	// The size of the uncompressed request payload body after transport decoding. Not
+	// set if transport encoding not used.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 5493
+	HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed")
+	// The size of the response payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as the
+	// [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For
+	// requests using transport encoding, this should be the compressed size.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 3495
+	HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+	// The size of the uncompressed response payload body after transport decoding.
+	// Not set if transport encoding not used.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 5493
+	HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed")
+	// The ordinal number of request re-sending attempt.
+	//
+	// Type: int
+	// Required: If and only if a request was retried.
+	// Stability: stable
+	// Examples: 3
+	HTTPRetryCountKey = attribute.Key("http.retry_count")
+)
+
+var (
+	// HTTP/1.0
+	HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+	// HTTP/1.1
+	HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+	// HTTP/2
+	HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+	// HTTP/3
+	HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+	// SPDY protocol
+	HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+	// QUIC protocol
+	HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// Semantic Convention for HTTP Server
+const (
+	// The primary server name of the matched virtual host. This should be obtained
+	// via configuration. If no such configuration can be obtained, this attribute
+	// MUST NOT be set ( `net.host.name` should be used instead).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'example.com'
+	// Note: `http.url` is usually not readily available on the server side but would
+	// have to be assembled in a cumbersome and sometimes lossy process from other
+	// information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus
+	// preferred to supply the raw data that is available.
+	HTTPServerNameKey = attribute.Key("http.server_name")
+	// The matched route (path template).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '/users/:userID?'
+	HTTPRouteKey = attribute.Key("http.route")
+	// The IP address of the original client behind all proxies, if known (e.g. from
+	// [X-Forwarded-For](https://developer.mozilla.org/en-
+	// US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '83.164.160.102'
+	// Note: This is not necessarily the same as `net.peer.ip`, which would
+	// identify the network-level peer, which may be a proxy.
+
+	// This attribute should be set when a source of information different
+	// from the one used for `net.peer.ip`, is available even if that other
+	// source just confirms the same value as `net.peer.ip`.
+	// Rationale: For `net.peer.ip`, one typically does not know if it
+	// comes from a proxy, reverse proxy, or the actual client. Setting
+	// `http.client_ip` when it's the same as `net.peer.ip` means that
+	// one is at least somewhat confident that the address is not that of
+	// the closest proxy.
+	HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+	// The keys in the `RequestItems` object field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'Users', 'Cats'
+	AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+	// The JSON-serialized value of each item in the `ConsumedCapacity` response
+	// field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : {
+	// "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+	// number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+	// "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+	// "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+	// "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+	// "string", "WriteCapacityUnits": number }'
+	AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+	// The JSON-serialized value of the `ItemCollectionMetrics` response field.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+	// "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+	// "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+	// "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }'
+	AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+	// The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+	//
+	// Type: double
+	// Required: No
+	// Stability: stable
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+	// The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+	//
+	// Type: double
+	// Required: No
+	// Stability: stable
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+	// The value of the `ConsistentRead` request parameter.
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+	// The value of the `ProjectionExpression` request parameter.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems,
+	// ProductReviews'
+	AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+	// The value of the `Limit` request parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+	// The value of the `AttributesToGet` request parameter.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'lives', 'id'
+	AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+	// The value of the `IndexName` request parameter.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'name_to_group'
+	AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+	// The value of the `Select` request parameter.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'ALL_ATTRIBUTES', 'COUNT'
+	AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// DynamoDB.CreateTable
+const (
+	// The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request
+	// field
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string",
+	// "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+	// "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits":
+	// number, "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+	// The JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+	// field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes":
+	// number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+	// "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+	// "ProjectionType": "string" } }'
+	AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// DynamoDB.ListTables
+const (
+	// The value of the `ExclusiveStartTableName` request parameter.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Users', 'CatsTable'
+	AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+	// The the number of items in the `TableNames` response parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 20
+	AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// DynamoDB.Query
+const (
+	// The value of the `ScanIndexForward` request parameter.
+	//
+	// Type: boolean
+	// Required: No
+	// Stability: stable
+	AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// DynamoDB.Scan
+const (
+	// The value of the `Segment` request parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+	// The value of the `TotalSegments` request parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 100
+	AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+	// The value of the `Count` response parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+	// The value of the `ScannedCount` response parameter.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 50
+	AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// DynamoDB.UpdateTable
+const (
+	// The JSON-serialized value of each item in the `AttributeDefinitions` request
+	// field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+	AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+	// The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates`
+	// request field.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+	// "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+	// number } }'
+	AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// This document defines the attributes used in messaging systems.
+const (
+	// A string identifying the messaging system.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+	MessagingSystemKey = attribute.Key("messaging.system")
+	// The message destination name. This might be equal to the span name but is
+	// required nevertheless.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'MyQueue', 'MyTopic'
+	MessagingDestinationKey = attribute.Key("messaging.destination")
+	// The kind of message destination
+	//
+	// Type: Enum
+	// Required: Required only if the message destination is either a `queue` or
+	// `topic`.
+	// Stability: stable
+	MessagingDestinationKindKey = attribute.Key("messaging.destination_kind")
+	// A boolean that is true if the message destination is temporary.
+	//
+	// Type: boolean
+	// Required: If missing, it is assumed to be false.
+	// Stability: stable
+	MessagingTempDestinationKey = attribute.Key("messaging.temp_destination")
+	// The name of the transport protocol.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'AMQP', 'MQTT'
+	MessagingProtocolKey = attribute.Key("messaging.protocol")
+	// The version of the transport protocol.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '0.9.1'
+	MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version")
+	// Connection string.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'tibjmsnaming://localhost:7222',
+	// 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue'
+	MessagingURLKey = attribute.Key("messaging.url")
+	// A value used by the messaging system as an identifier for the message,
+	// represented as a string.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+	MessagingMessageIDKey = attribute.Key("messaging.message_id")
+	// The [conversation ID](#conversations) identifying the conversation to which the
+	// message belongs, represented as a string. Sometimes called "Correlation ID".
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'MyConversationID'
+	MessagingConversationIDKey = attribute.Key("messaging.conversation_id")
+	// The (uncompressed) size of the message payload in bytes. Also use this
+	// attribute if it is unknown whether the compressed or uncompressed payload size
+	// is reported.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 2738
+	MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes")
+	// The compressed size of the message payload in bytes.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 2048
+	MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes")
+)
+
+var (
+	// A message sent to a queue
+	MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+	// A message sent to a topic
+	MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// Semantic convention for a consumer of messages received from a messaging system
+const (
+	// A string identifying the kind of message consumption as defined in the
+	// [Operation names](#operation-names) section above. If the operation is "send",
+	// this attribute MUST NOT be set, since the operation can be inferred from the
+	// span kind in that case.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	MessagingOperationKey = attribute.Key("messaging.operation")
+	// The identifier for the consumer receiving a message. For Kafka, set it to
+	// `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are
+	// present, or only `messaging.kafka.consumer_group`. For brokers, such as
+	// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+	// message.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'mygroup - client-6'
+	MessagingConsumerIDKey = attribute.Key("messaging.consumer_id")
+)
+
+var (
+	// receive
+	MessagingOperationReceive = MessagingOperationKey.String("receive")
+	// process
+	MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// Attributes for RabbitMQ
+const (
+	// RabbitMQ message routing key.
+	//
+	// Type: string
+	// Required: Unless it is empty.
+	// Stability: stable
+	// Examples: 'myKey'
+	MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key")
+)
+
+// Attributes for Apache Kafka
+const (
+	// Message keys in Kafka are used for grouping alike messages to ensure they're
+	// processed on the same partition. They differ from `messaging.message_id` in
+	// that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'myKey'
+	// Note: If the key type is not string, it's string representation has to be
+	// supplied for the attribute. If the key has no unambiguous, canonical string
+	// form, don't include its value.
+	MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key")
+	// Name of the Kafka Consumer Group that is handling the message. Only applies to
+	// consumers, not producers.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'my-group'
+	MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group")
+	// Client ID for the Consumer or Producer that is handling the message.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'client-5'
+	MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+	// Partition the message is sent to.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Examples: 2
+	MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition")
+	// A boolean that is true if the message is a tombstone.
+	//
+	// Type: boolean
+	// Required: If missing, it is assumed to be false.
+	// Stability: stable
+	MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone")
+)
+
+// Attributes for Apache RocketMQ
+const (
+	// Namespace of RocketMQ resources, resources in different namespaces are
+	// individual.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'myNamespace'
+	MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+	// Name of the RocketMQ producer/consumer group that is handling the message. The
+	// client type is identified by the SpanKind.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'myConsumerGroup'
+	MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+	// The unique identifier for each client.
+	//
+	// Type: string
+	// Required: Always
+	// Stability: stable
+	// Examples: 'myhost@8742@s8083jm'
+	MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+	// Type of message.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type")
+	// The secondary classifier of message besides topic.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'tagA'
+	MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag")
+	// Key(s) of message, another way to mark message besides message id.
+	//
+	// Type: string[]
+	// Required: No
+	// Stability: stable
+	// Examples: 'keyA', 'keyB'
+	MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys")
+	// Model of message consumption. This only applies to consumer spans.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+	// Normal message
+	MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+	// FIFO message
+	MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+	// Delay message
+	MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+	// Transaction message
+	MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+	// Clustering consumption model
+	MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+	// Broadcasting consumption model
+	MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// This document defines semantic conventions for remote procedure calls.
+const (
+	// A string identifying the remoting system. See below for a list of well-known
+	// identifiers.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	RPCSystemKey = attribute.Key("rpc.system")
+	// The full (logical) name of the service being called, including its package
+	// name, if applicable.
+	//
+	// Type: string
+	// Required: No, but recommended
+	// Stability: stable
+	// Examples: 'myservice.EchoService'
+	// Note: This is the logical name of the service from the RPC interface
+	// perspective, which can be different from the name of any implementing class.
+	// The `code.namespace` attribute may be used to store the latter (despite the
+	// attribute name, it may include a class name; e.g., class with method actually
+	// executing the call on the server side, RPC client stub class on the client
+	// side).
+	RPCServiceKey = attribute.Key("rpc.service")
+	// The name of the (logical) method being called, must be equal to the $method
+	// part in the span name.
+	//
+	// Type: string
+	// Required: No, but recommended
+	// Stability: stable
+	// Examples: 'exampleMethod'
+	// Note: This is the logical name of the method from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// method/function. The `code.function` attribute may be used to store the latter
+	// (e.g., method actually executing the call on the server side, RPC client stub
+	// method on the client side).
+	RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+	// gRPC
+	RPCSystemGRPC = RPCSystemKey.String("grpc")
+	// Java RMI
+	RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+	// .NET WCF
+	RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+	// Apache Dubbo
+	RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// Tech-specific attributes for gRPC.
+const (
+	// The [numeric status
+	// code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC
+	// request.
+	//
+	// Type: Enum
+	// Required: Always
+	// Stability: stable
+	RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+	// OK
+	RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+	// CANCELLED
+	RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+	// UNKNOWN
+	RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+	// INVALID_ARGUMENT
+	RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+	// DEADLINE_EXCEEDED
+	RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+	// NOT_FOUND
+	RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+	// ALREADY_EXISTS
+	RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+	// PERMISSION_DENIED
+	RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+	// RESOURCE_EXHAUSTED
+	RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+	// FAILED_PRECONDITION
+	RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+	// ABORTED
+	RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+	// OUT_OF_RANGE
+	RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+	// UNIMPLEMENTED
+	RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+	// INTERNAL
+	RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+	// UNAVAILABLE
+	RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+	// DATA_LOSS
+	RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+	// UNAUTHENTICATED
+	RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+	// Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC
+	// 1.0 does not specify this, the value can be omitted.
+	//
+	// Type: string
+	// Required: If missing, it is assumed to be "1.0".
+	// Stability: stable
+	// Examples: '2.0', '1.0'
+	RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+	// `id` property of request or response. Since protocol allows id to be int,
+	// string, `null` or missing (for notifications), value is expected to be cast to
+	// string for simplicity. Use empty string in case of `null` value. Omit entirely
+	// if this is a notification.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: '10', 'request-7', ''
+	RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+	// `error.code` property of response if it is an error response.
+	//
+	// Type: int
+	// Required: If missing, response is assumed to be successful.
+	// Stability: stable
+	// Examples: -32700, 100
+	RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+	// `error.message` property of response if it is an error response.
+	//
+	// Type: string
+	// Required: No
+	// Stability: stable
+	// Examples: 'Parse error', 'User already exists'
+	RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPC received/sent message.
+const (
+	// Whether this is a received or sent message.
+	//
+	// Type: Enum
+	// Required: No
+	// Stability: stable
+	MessageTypeKey = attribute.Key("message.type")
+	// MUST be calculated as two different counters starting from `1` one for sent
+	// messages and one for received message.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	// Note: This way we guarantee that the values will be consistent between
+	// different implementations.
+	MessageIDKey = attribute.Key("message.id")
+	// Compressed size of the message in bytes.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+	// Uncompressed size of the message in bytes.
+	//
+	// Type: int
+	// Required: No
+	// Stability: stable
+	MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+	// sent
+	MessageTypeSent = MessageTypeKey.String("SENT")
+	// received
+	MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
new file mode 100644
index 000000000..71a1f7748
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.17.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
new file mode 100644
index 000000000..679c40c4d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+	// FeatureFlagKeyKey is the attribute Key conforming to the
+	// "feature_flag.key" semantic conventions. It represents the unique
+	// identifier of the feature flag.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'logo-color'
+	FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+	// FeatureFlagProviderNameKey is the attribute Key conforming to the
+	// "feature_flag.provider_name" semantic conventions. It represents the
+	// name of the service provider that performs the flag evaluation.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'Flag Manager'
+	FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+	// FeatureFlagVariantKey is the attribute Key conforming to the
+	// "feature_flag.variant" semantic conventions. It represents the sHOULD be
+	// a semantic identifier for a value. If one is unavailable, a stringified
+	// version of the value can be used.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'red', 'true', 'on'
+	// Note: A semantic identifier, commonly referred to as a variant, provides
+	// a means
+	// for referring to a value without including the value itself. This can
+	// provide additional context for understanding the meaning behind a value.
+	// For example, the variant `red` maybe be used for the value `#c05543`.
+	//
+	// A stringified version of the value can be used in situations where a
+	// semantic identifier is unavailable. String representation of the value
+	// should be determined by the implementer.
+	FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+	return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+	return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+	return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+	// MessageTypeKey is the attribute Key conforming to the "message.type"
+	// semantic conventions. It represents the whether this is a received or
+	// sent message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessageTypeKey = attribute.Key("message.type")
+
+	// MessageIDKey is the attribute Key conforming to the "message.id"
+	// semantic conventions. It represents the mUST be calculated as two
+	// different counters starting from `1` one for sent messages and one for
+	// received message.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: This way we guarantee that the values will be consistent between
+	// different implementations.
+	MessageIDKey = attribute.Key("message.id")
+
+	// MessageCompressedSizeKey is the attribute Key conforming to the
+	// "message.compressed_size" semantic conventions. It represents the
+	// compressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+	// MessageUncompressedSizeKey is the attribute Key conforming to the
+	// "message.uncompressed_size" semantic conventions. It represents the
+	// uncompressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+	// sent
+	MessageTypeSent = MessageTypeKey.String("SENT")
+	// received
+	MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+	return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+	return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+	return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+	// ExceptionEscapedKey is the attribute Key conforming to the
+	// "exception.escaped" semantic conventions. It represents the sHOULD be
+	// set to true if the exception event is recorded at a point where it is
+	// known that the exception is escaping the scope of the span.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: An exception is considered to have escaped (or left) the scope of
+	// a span,
+	// if that span is ended while the exception is still logically "in
+	// flight".
+	// This may be actually "in flight" in some languages (e.g. if the
+	// exception
+	// is passed to a Context manager's `__exit__` method in Python) but will
+	// usually be caught at the point of recording the exception in most
+	// languages.
+	//
+	// It is usually not possible to determine at the point where an exception
+	// is thrown
+	// whether it will escape the scope of a span.
+	// However, it is trivial to know that an exception
+	// will escape, if one checks for an active exception just before ending
+	// the span,
+	// as done in the [example above](#recording-an-exception).
+	//
+	// It follows that an exception may still escape the scope of the span
+	// even if the `exception.escaped` attribute was not set or set to false,
+	// since the event might have been recorded at a time where it was not
+	// clear whether the exception will escape.
+	ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+	return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
new file mode 100644
index 000000000..9b8c559de
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+const (
+	// ExceptionEventName is the name of the Span event representing an exception.
+	ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
new file mode 100644
index 000000000..d5c4b5c13
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
@@ -0,0 +1,21 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// HTTP scheme attributes.
+var (
+	HTTPSchemeHTTP  = HTTPSchemeKey.String("http")
+	HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go
new file mode 100644
index 000000000..c60b2a6bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go
@@ -0,0 +1,150 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package httpconv provides OpenTelemetry semantic convetions for the net/http
+// package from the standard library.
+package httpconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
+
+import (
+	"net/http"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/semconv/internal/v2"
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+var (
+	nc = &internal.NetConv{
+		NetHostNameKey:     semconv.NetHostNameKey,
+		NetHostPortKey:     semconv.NetHostPortKey,
+		NetPeerNameKey:     semconv.NetPeerNameKey,
+		NetPeerPortKey:     semconv.NetPeerPortKey,
+		NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
+		NetSockPeerPortKey: semconv.NetSockPeerPortKey,
+		NetTransportOther:  semconv.NetTransportOther,
+		NetTransportTCP:    semconv.NetTransportTCP,
+		NetTransportUDP:    semconv.NetTransportUDP,
+		NetTransportInProc: semconv.NetTransportInProc,
+	}
+
+	hc = &internal.HTTPConv{
+		NetConv: nc,
+
+		EnduserIDKey:                 semconv.EnduserIDKey,
+		HTTPClientIPKey:              semconv.HTTPClientIPKey,
+		HTTPFlavorKey:                semconv.HTTPFlavorKey,
+		HTTPMethodKey:                semconv.HTTPMethodKey,
+		HTTPRequestContentLengthKey:  semconv.HTTPRequestContentLengthKey,
+		HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
+		HTTPRouteKey:                 semconv.HTTPRouteKey,
+		HTTPSchemeHTTP:               semconv.HTTPSchemeHTTP,
+		HTTPSchemeHTTPS:              semconv.HTTPSchemeHTTPS,
+		HTTPStatusCodeKey:            semconv.HTTPStatusCodeKey,
+		HTTPTargetKey:                semconv.HTTPTargetKey,
+		HTTPURLKey:                   semconv.HTTPURLKey,
+		HTTPUserAgentKey:             semconv.HTTPUserAgentKey,
+	}
+)
+
+// ClientResponse returns attributes for an HTTP response received by a client
+// from a server. It will return the following attributes if the related values
+// are defined in resp: "http.status.code", "http.response_content_length".
+//
+// This does not add all OpenTelemetry required attributes for an HTTP event,
+// it assumes ClientRequest was used to create the span with a complete set of
+// attributes. If a complete set of attributes can be generated using the
+// request contained in resp. For example:
+//
+//	append(ClientResponse(resp), ClientRequest(resp.Request)...)
+func ClientResponse(resp *http.Response) []attribute.KeyValue {
+	return hc.ClientResponse(resp)
+}
+
+// ClientRequest returns attributes for an HTTP request made by a client. The
+// following attributes are always returned: "http.url", "http.flavor",
+// "http.method", "net.peer.name". The following attributes are returned if the
+// related values are defined in req: "net.peer.port", "http.user_agent",
+// "http.request_content_length", "enduser.id".
+func ClientRequest(req *http.Request) []attribute.KeyValue {
+	return hc.ClientRequest(req)
+}
+
+// ClientStatus returns a span status code and message for an HTTP status code
+// value received by a client.
+func ClientStatus(code int) (codes.Code, string) {
+	return hc.ClientStatus(code)
+}
+
+// ServerRequest returns attributes for an HTTP request received by a server.
+//
+// The server must be the primary server name if it is known. For example this
+// would be the ServerName directive
+// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
+// server, and the server_name directive
+// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
+// nginx server. More generically, the primary server name would be the host
+// header value that matches the default virtual host of an HTTP server. It
+// should include the host identifier and if a port is used to route to the
+// server that port identifier should be included as an appropriate port
+// suffix.
+//
+// If the primary server name is not known, server should be an empty string.
+// The req Host will be used to determine the server instead.
+//
+// The following attributes are always returned: "http.method", "http.scheme",
+// "http.flavor", "http.target", "net.host.name". The following attributes are
+// returned if they related values are defined in req: "net.host.port",
+// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
+// "http.client_ip".
+func ServerRequest(server string, req *http.Request) []attribute.KeyValue {
+	return hc.ServerRequest(server, req)
+}
+
+// ServerStatus returns a span status code and message for an HTTP status code
+// value returned by a server. Status codes in the 400-499 range are not
+// returned as errors.
+func ServerStatus(code int) (codes.Code, string) {
+	return hc.ServerStatus(code)
+}
+
+// RequestHeader returns the contents of h as attributes.
+//
+// Instrumentation should require an explicit configuration of which headers to
+// captured and then prune what they pass here. Including all headers can be a
+// security risk - explicit configuration helps avoid leaking sensitive
+// information.
+//
+// The User-Agent header is already captured in the http.user_agent attribute
+// from ClientRequest and ServerRequest. Instrumentation may provide an option
+// to capture that header here even though it is not recommended. Otherwise,
+// instrumentation should filter that out of what is passed.
+func RequestHeader(h http.Header) []attribute.KeyValue {
+	return hc.RequestHeader(h)
+}
+
+// ResponseHeader returns the contents of h as attributes.
+//
+// Instrumentation should require an explicit configuration of which headers to
+// captured and then prune what they pass here. Including all headers can be a
+// security risk - explicit configuration helps avoid leaking sensitive
+// information.
+//
+// The User-Agent header is already captured in the http.user_agent attribute
+// from ClientRequest and ServerRequest. Instrumentation may provide an option
+// to capture that header here even though it is not recommended. Otherwise,
+// instrumentation should filter that out of what is passed.
+func ResponseHeader(h http.Header) []attribute.KeyValue {
+	return hc.ResponseHeader(h)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
new file mode 100644
index 000000000..39a2eab3a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
@@ -0,0 +1,2010 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+	// BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+	// semantic conventions. It represents the array of brand name and version
+	// separated by a space
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.brands`).
+	BrowserBrandsKey = attribute.Key("browser.brands")
+
+	// BrowserPlatformKey is the attribute Key conforming to the
+	// "browser.platform" semantic conventions. It represents the platform on
+	// which the browser is running
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Windows', 'macOS', 'Android'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.platform`). If unavailable, the legacy
+	// `navigator.platform` API SHOULD NOT be used instead and this attribute
+	// SHOULD be left unset in order for the values to be consistent.
+	// The list of possible values is defined in the [W3C User-Agent Client
+	// Hints
+	// specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+	// Note that some (but not all) of these values can overlap with values in
+	// the [`os.type` and `os.name` attributes](./os.md). However, for
+	// consistency, the values in the `browser.platform` attribute should
+	// capture the exact value that the user agent provides.
+	BrowserPlatformKey = attribute.Key("browser.platform")
+
+	// BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+	// semantic conventions. It represents a boolean that is true if the
+	// browser is running on a mobile device
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.mobile`). If unavailable, this attribute
+	// SHOULD be left unset.
+	BrowserMobileKey = attribute.Key("browser.mobile")
+
+	// BrowserUserAgentKey is the attribute Key conforming to the
+	// "browser.user_agent" semantic conventions. It represents the full
+	// user-agent string provided by the browser
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)
+	// AppleWebKit/537.36 (KHTML, '
+	//  'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
+	// Note: The user-agent value SHOULD be provided only from browsers that do
+	// not have a mechanism to retrieve brands and platform individually from
+	// the User-Agent Client Hints API. To retrieve the value, the legacy
+	// `navigator.userAgent` API can be used.
+	BrowserUserAgentKey = attribute.Key("browser.user_agent")
+
+	// BrowserLanguageKey is the attribute Key conforming to the
+	// "browser.language" semantic conventions. It represents the preferred
+	// language of the user using the browser
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'en', 'en-US', 'fr', 'fr-FR'
+	// Note: This value is intended to be taken from the Navigator API
+	// `navigator.language`.
+	BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+	return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+	return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+	return BrowserMobileKey.Bool(val)
+}
+
+// BrowserUserAgent returns an attribute KeyValue conforming to the
+// "browser.user_agent" semantic conventions. It represents the full user-agent
+// string provided by the browser
+func BrowserUserAgent(val string) attribute.KeyValue {
+	return BrowserUserAgentKey.String(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+	return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+	// CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+	// semantic conventions. It represents the name of the cloud provider.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	CloudProviderKey = attribute.Key("cloud.provider")
+
+	// CloudAccountIDKey is the attribute Key conforming to the
+	// "cloud.account.id" semantic conventions. It represents the cloud account
+	// ID the resource is assigned to.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '111111111111', 'opentelemetry'
+	CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+	// CloudRegionKey is the attribute Key conforming to the "cloud.region"
+	// semantic conventions. It represents the geographical region the resource
+	// is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'us-central1', 'us-east-1'
+	// Note: Refer to your provider's docs to see the available regions, for
+	// example [Alibaba Cloud
+	// regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+	// regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+	// [Azure
+	// regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+	// [Google Cloud regions](https://cloud.google.com/about/locations), or
+	// [Tencent Cloud
+	// regions](https://intl.cloud.tencent.com/document/product/213/6091).
+	CloudRegionKey = attribute.Key("cloud.region")
+
+	// CloudAvailabilityZoneKey is the attribute Key conforming to the
+	// "cloud.availability_zone" semantic conventions. It represents the cloud
+	// regions often have multiple, isolated locations known as zones to
+	// increase availability. Availability zone represents the zone where the
+	// resource is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'us-east-1c'
+	// Note: Availability zones are called "zones" on Alibaba Cloud and Google
+	// Cloud.
+	CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+	// CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+	// semantic conventions. It represents the cloud platform in use.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: The prefix of the service SHOULD match the one specified in
+	// `cloud.provider`.
+	CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+	// Alibaba Cloud
+	CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	CloudProviderAWS = CloudProviderKey.String("aws")
+	// Microsoft Azure
+	CloudProviderAzure = CloudProviderKey.String("azure")
+	// Google Cloud Platform
+	CloudProviderGCP = CloudProviderKey.String("gcp")
+	// IBM Cloud
+	CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+	// Tencent Cloud
+	CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+	// Alibaba Cloud Elastic Compute Service
+	CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+	// Alibaba Cloud Function Compute
+	CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+	// Red Hat OpenShift on Alibaba Cloud
+	CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+	// AWS Elastic Compute Cloud
+	CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+	// AWS Elastic Container Service
+	CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+	// AWS Elastic Kubernetes Service
+	CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+	// AWS Lambda
+	CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+	// AWS Elastic Beanstalk
+	CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+	// AWS App Runner
+	CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+	// Red Hat OpenShift on AWS (ROSA)
+	CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+	// Azure Virtual Machines
+	CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+	// Azure Container Instances
+	CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+	// Azure Kubernetes Service
+	CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+	// Azure Functions
+	CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+	// Azure App Service
+	CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+	// Azure Red Hat OpenShift
+	CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+	// Google Cloud Compute Engine (GCE)
+	CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+	// Google Cloud Run
+	CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+	// Google Cloud Kubernetes Engine (GKE)
+	CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+	// Google Cloud Functions (GCF)
+	CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+	// Google Cloud App Engine (GAE)
+	CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+	// Red Hat OpenShift on Google Cloud
+	CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift")
+	// Red Hat OpenShift on IBM Cloud
+	CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+	// Tencent Cloud Cloud Virtual Machine (CVM)
+	CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+	// Tencent Cloud Elastic Kubernetes Service (EKS)
+	CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+	// Tencent Cloud Serverless Cloud Function (SCF)
+	CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+	return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+	return CloudRegionKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+	return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+	// AWSECSContainerARNKey is the attribute Key conforming to the
+	// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+	// Resource Name (ARN) of an [ECS container
+	// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+	AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+	// AWSECSClusterARNKey is the attribute Key conforming to the
+	// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+	// [ECS
+	// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+	// AWSECSLaunchtypeKey is the attribute Key conforming to the
+	// "aws.ecs.launchtype" semantic conventions. It represents the [launch
+	// type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+	// for an ECS task.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+	// AWSECSTaskARNKey is the attribute Key conforming to the
+	// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+	// [ECS task
+	// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+	// AWSECSTaskFamilyKey is the attribute Key conforming to the
+	// "aws.ecs.task.family" semantic conventions. It represents the task
+	// definition family this task definition is a member of.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-family'
+	AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+	// AWSECSTaskRevisionKey is the attribute Key conforming to the
+	// "aws.ecs.task.revision" semantic conventions. It represents the revision
+	// for this task definition.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '8', '26'
+	AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+	// ec2
+	AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+	// fargate
+	AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+	return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+	return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+	return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+	return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+	return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+	// AWSEKSClusterARNKey is the attribute Key conforming to the
+	// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+	// EKS cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+	return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+	// AWSLogGroupNamesKey is the attribute Key conforming to the
+	// "aws.log.group.names" semantic conventions. It represents the name(s) of
+	// the AWS log group(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+	// Note: Multiple log groups must be supported for cases like
+	// multi-container applications, where a single application has sidecar
+	// containers, and each write to their own log group.
+	AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+	// AWSLogGroupARNsKey is the attribute Key conforming to the
+	// "aws.log.group.arns" semantic conventions. It represents the Amazon
+	// Resource Name(s) (ARN) of the AWS log group(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+	// Note: See the [log group ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+	// AWSLogStreamNamesKey is the attribute Key conforming to the
+	// "aws.log.stream.names" semantic conventions. It represents the name(s)
+	// of the AWS log stream(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+	// AWSLogStreamARNsKey is the attribute Key conforming to the
+	// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+	// the AWS log stream(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	// Note: See the [log stream ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	// One log group can contain several log streams, so these ARNs necessarily
+	// identify both a log group and a log stream.
+	AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+	return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+	return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+	return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+	return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// A container instance.
+const (
+	// ContainerNameKey is the attribute Key conforming to the "container.name"
+	// semantic conventions. It represents the container name used by container
+	// runtime.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-autoconf'
+	ContainerNameKey = attribute.Key("container.name")
+
+	// ContainerIDKey is the attribute Key conforming to the "container.id"
+	// semantic conventions. It represents the container ID. Usually a UUID, as
+	// for example used to [identify Docker
+	// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+	// The UUID might be abbreviated.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'a3bf90e006b2'
+	ContainerIDKey = attribute.Key("container.id")
+
+	// ContainerRuntimeKey is the attribute Key conforming to the
+	// "container.runtime" semantic conventions. It represents the container
+	// runtime managing this container.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'docker', 'containerd', 'rkt'
+	ContainerRuntimeKey = attribute.Key("container.runtime")
+
+	// ContainerImageNameKey is the attribute Key conforming to the
+	// "container.image.name" semantic conventions. It represents the name of
+	// the image the container was built on.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'gcr.io/opentelemetry/operator'
+	ContainerImageNameKey = attribute.Key("container.image.name")
+
+	// ContainerImageTagKey is the attribute Key conforming to the
+	// "container.image.tag" semantic conventions. It represents the container
+	// image tag.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '0.1'
+	ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+	return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+	return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+	return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+	return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+	return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+	// DeploymentEnvironmentKey is the attribute Key conforming to the
+	// "deployment.environment" semantic conventions. It represents the name of
+	// the [deployment
+	// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+	// deployment tier).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'staging', 'production'
+	DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+	return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+	// DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+	// conventions. It represents a unique identifier representing the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+	// Note: The device identifier MUST only be defined using the values
+	// outlined below. This value is not an advertising identifier and MUST NOT
+	// be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+	// to the [vendor
+	// identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+	// On Android (Java or Kotlin), this value MUST be equal to the Firebase
+	// Installation ID or a globally unique UUID which is persisted across
+	// sessions in your application. More information can be found
+	// [here](https://developer.android.com/training/articles/user-data-ids) on
+	// best practices and exact implementation details. Caution should be taken
+	// when storing personal data or anything which can identify a user. GDPR
+	// and data protection laws may apply, ensure you do your own due
+	// diligence.
+	DeviceIDKey = attribute.Key("device.id")
+
+	// DeviceModelIdentifierKey is the attribute Key conforming to the
+	// "device.model.identifier" semantic conventions. It represents the model
+	// identifier for the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'iPhone3,4', 'SM-G920F'
+	// Note: It's recommended this value represents a machine readable version
+	// of the model identifier rather than the market or consumer-friendly name
+	// of the device.
+	DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+	// DeviceModelNameKey is the attribute Key conforming to the
+	// "device.model.name" semantic conventions. It represents the marketing
+	// name for the device model
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+	// Note: It's recommended this value represents a human readable version of
+	// the device model rather than a machine readable alternative.
+	DeviceModelNameKey = attribute.Key("device.model.name")
+
+	// DeviceManufacturerKey is the attribute Key conforming to the
+	// "device.manufacturer" semantic conventions. It represents the name of
+	// the device manufacturer
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Apple', 'Samsung'
+	// Note: The Android OS provides this field via
+	// [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+	// iOS apps SHOULD hardcode the value `Apple`.
+	DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+	return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+	return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+	return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+	return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+	// FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+	// conventions. It represents the name of the single function that this
+	// runtime instance executes.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+	// Note: This is the name of the function as configured/deployed on the
+	// FaaS
+	// platform and is usually different from the name of the callback
+	// function (which may be stored in the
+	// [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+	// span attributes).
+	//
+	// For some cloud providers, the above definition is ambiguous. The
+	// following
+	// definition of function name MUST be used for this attribute
+	// (and consequently the span name) for the listed cloud
+	// providers/products:
+	//
+	// * **Azure:**  The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+	//   followed by a forward slash followed by the function name (this form
+	//   can also be seen in the resource JSON for the function).
+	//   This means that a span attribute MUST be used, as an Azure function
+	//   app can host multiple functions that would usually share
+	//   a TracerProvider (see also the `faas.id` attribute).
+	FaaSNameKey = attribute.Key("faas.name")
+
+	// FaaSIDKey is the attribute Key conforming to the "faas.id" semantic
+	// conventions. It represents the unique ID of the single function that
+	// this runtime instance executes.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+	// Note: On some cloud providers, it may not be possible to determine the
+	// full ID at startup,
+	// so consider setting `faas.id` as a span attribute instead.
+	//
+	// The exact value to use for `faas.id` depends on the cloud provider:
+	//
+	// * **AWS Lambda:** The function
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+	//   Take care not to use the "invoked ARN" directly but replace any
+	//   [alias
+	// suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+	//   with the resolved function version, as the same runtime instance may
+	// be invokable with
+	//   multiple different aliases.
+	// * **GCP:** The [URI of the
+	// resource](https://cloud.google.com/iam/docs/full-resource-names)
+	// * **Azure:** The [Fully Qualified Resource
+	// ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+	// of the invoked function,
+	//   *not* the function app, having the form
+	// `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+	//   This means that a span attribute MUST be used, as an Azure function
+	// app can host multiple functions that would usually share
+	//   a TracerProvider.
+	FaaSIDKey = attribute.Key("faas.id")
+
+	// FaaSVersionKey is the attribute Key conforming to the "faas.version"
+	// semantic conventions. It represents the immutable version of the
+	// function being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '26', 'pinkfroid-00002'
+	// Note: Depending on the cloud provider and platform, use:
+	//
+	// * **AWS Lambda:** The [function
+	// version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+	//   (an integer represented as a decimal string).
+	// * **Google Cloud Run:** The
+	// [revision](https://cloud.google.com/run/docs/managing/revisions)
+	//   (i.e., the function name plus the revision suffix).
+	// * **Google Cloud Functions:** The value of the
+	//   [`K_REVISION` environment
+	// variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+	// * **Azure Functions:** Not applicable. Do not set this attribute.
+	FaaSVersionKey = attribute.Key("faas.version")
+
+	// FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+	// semantic conventions. It represents the execution environment ID as a
+	// string, that will be potentially reused for other invocations to the
+	// same function/function version.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+	// Note: * **AWS Lambda:** Use the (full) log stream name.
+	FaaSInstanceKey = attribute.Key("faas.instance")
+
+	// FaaSMaxMemoryKey is the attribute Key conforming to the
+	// "faas.max_memory" semantic conventions. It represents the amount of
+	// memory available to the serverless function in MiB.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 128
+	// Note: It's recommended to set this attribute since e.g. too little
+	// memory can easily stop a Java AWS Lambda function from working
+	// correctly. On AWS Lambda, the environment variable
+	// `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information.
+	FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+	return FaaSNameKey.String(val)
+}
+
+// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic
+// conventions. It represents the unique ID of the single function that this
+// runtime instance executes.
+func FaaSID(val string) attribute.KeyValue {
+	return FaaSIDKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+	return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+	return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function in MiB.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+	return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+	// HostIDKey is the attribute Key conforming to the "host.id" semantic
+	// conventions. It represents the unique host ID. For Cloud, this must be
+	// the instance_id assigned by the cloud provider. For non-containerized
+	// Linux systems, the `machine-id` located in `/etc/machine-id` or
+	// `/var/lib/dbus/machine-id` may be used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+	HostIDKey = attribute.Key("host.id")
+
+	// HostNameKey is the attribute Key conforming to the "host.name" semantic
+	// conventions. It represents the name of the host. On Unix systems, it may
+	// contain what the hostname command returns, or the fully qualified
+	// hostname, or another name specified by the user.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-test'
+	HostNameKey = attribute.Key("host.name")
+
+	// HostTypeKey is the attribute Key conforming to the "host.type" semantic
+	// conventions. It represents the type of host. For Cloud, this must be the
+	// machine type.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'n1-standard-1'
+	HostTypeKey = attribute.Key("host.type")
+
+	// HostArchKey is the attribute Key conforming to the "host.arch" semantic
+	// conventions. It represents the CPU architecture the host system is
+	// running on.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	HostArchKey = attribute.Key("host.arch")
+
+	// HostImageNameKey is the attribute Key conforming to the
+	// "host.image.name" semantic conventions. It represents the name of the VM
+	// image or OS install the host was instantiated from.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+	HostImageNameKey = attribute.Key("host.image.name")
+
+	// HostImageIDKey is the attribute Key conforming to the "host.image.id"
+	// semantic conventions. It represents the vM image ID. For Cloud, this
+	// value is from the provider.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'ami-07b06b442921831e5'
+	HostImageIDKey = attribute.Key("host.image.id")
+
+	// HostImageVersionKey is the attribute Key conforming to the
+	// "host.image.version" semantic conventions. It represents the version
+	// string of the VM image as defined in [Version
+	// Attributes](README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '0.1'
+	HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+	// AMD64
+	HostArchAMD64 = HostArchKey.String("amd64")
+	// ARM32
+	HostArchARM32 = HostArchKey.String("arm32")
+	// ARM64
+	HostArchARM64 = HostArchKey.String("arm64")
+	// Itanium
+	HostArchIA64 = HostArchKey.String("ia64")
+	// 32-bit PowerPC
+	HostArchPPC32 = HostArchKey.String("ppc32")
+	// 64-bit PowerPC
+	HostArchPPC64 = HostArchKey.String("ppc64")
+	// IBM z/Architecture
+	HostArchS390x = HostArchKey.String("s390x")
+	// 32-bit x86
+	HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized Linux
+// systems, the `machine-id` located in `/etc/machine-id` or
+// `/var/lib/dbus/machine-id` may be used.
+func HostID(val string) attribute.KeyValue {
+	return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+	return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+	return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+	return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+	return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+	return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+	// K8SClusterNameKey is the attribute Key conforming to the
+	// "k8s.cluster.name" semantic conventions. It represents the name of the
+	// cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-cluster'
+	K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+	return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+	// K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+	// semantic conventions. It represents the name of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'node-1'
+	K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+	// K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+	// semantic conventions. It represents the UID of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+	K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+	return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+	return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+	// K8SNamespaceNameKey is the attribute Key conforming to the
+	// "k8s.namespace.name" semantic conventions. It represents the name of the
+	// namespace that the pod is running in.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'default'
+	K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+	return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+	// K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+	// semantic conventions. It represents the UID of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+	// K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+	// semantic conventions. It represents the name of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry-pod-autoconf'
+	K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+	return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+	return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+	// K8SContainerNameKey is the attribute Key conforming to the
+	// "k8s.container.name" semantic conventions. It represents the name of the
+	// Container from Pod specification, must be unique within a Pod. Container
+	// runtime usually uses different globally unique name (`container.name`).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'redis'
+	K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+	// K8SContainerRestartCountKey is the attribute Key conforming to the
+	// "k8s.container.restart_count" semantic conventions. It represents the
+	// number of times the container was restarted. This attribute can be used
+	// to identify a particular container (running or stopped) within a
+	// container spec.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 0, 2
+	K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+	return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+	return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+	// K8SReplicaSetUIDKey is the attribute Key conforming to the
+	// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+	// ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+	// K8SReplicaSetNameKey is the attribute Key conforming to the
+	// "k8s.replicaset.name" semantic conventions. It represents the name of
+	// the ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+	return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+	return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+	// K8SDeploymentUIDKey is the attribute Key conforming to the
+	// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+	// Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+	// K8SDeploymentNameKey is the attribute Key conforming to the
+	// "k8s.deployment.name" semantic conventions. It represents the name of
+	// the Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+	return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+	return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+	// K8SStatefulSetUIDKey is the attribute Key conforming to the
+	// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+	// StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+	// K8SStatefulSetNameKey is the attribute Key conforming to the
+	// "k8s.statefulset.name" semantic conventions. It represents the name of
+	// the StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+	return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+	return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+	// K8SDaemonSetUIDKey is the attribute Key conforming to the
+	// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+	// K8SDaemonSetNameKey is the attribute Key conforming to the
+	// "k8s.daemonset.name" semantic conventions. It represents the name of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+	return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+	return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+	// K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+	// semantic conventions. It represents the UID of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+	// K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+	// semantic conventions. It represents the name of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+	return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+	return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+	// K8SCronJobUIDKey is the attribute Key conforming to the
+	// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+	// K8SCronJobNameKey is the attribute Key conforming to the
+	// "k8s.cronjob.name" semantic conventions. It represents the name of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+	return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+	return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+	// OSTypeKey is the attribute Key conforming to the "os.type" semantic
+	// conventions. It represents the operating system type.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	OSTypeKey = attribute.Key("os.type")
+
+	// OSDescriptionKey is the attribute Key conforming to the "os.description"
+	// semantic conventions. It represents the human readable (not intended to
+	// be parsed) OS version information, like e.g. reported by `ver` or
+	// `lsb_release -a` commands.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+	// LTS'
+	OSDescriptionKey = attribute.Key("os.description")
+
+	// OSNameKey is the attribute Key conforming to the "os.name" semantic
+	// conventions. It represents the human readable operating system name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'iOS', 'Android', 'Ubuntu'
+	OSNameKey = attribute.Key("os.name")
+
+	// OSVersionKey is the attribute Key conforming to the "os.version"
+	// semantic conventions. It represents the version string of the operating
+	// system as defined in [Version
+	// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '14.2.1', '18.04.1'
+	OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+	// Microsoft Windows
+	OSTypeWindows = OSTypeKey.String("windows")
+	// Linux
+	OSTypeLinux = OSTypeKey.String("linux")
+	// Apple Darwin
+	OSTypeDarwin = OSTypeKey.String("darwin")
+	// FreeBSD
+	OSTypeFreeBSD = OSTypeKey.String("freebsd")
+	// NetBSD
+	OSTypeNetBSD = OSTypeKey.String("netbsd")
+	// OpenBSD
+	OSTypeOpenBSD = OSTypeKey.String("openbsd")
+	// DragonFly BSD
+	OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+	// HP-UX (Hewlett Packard Unix)
+	OSTypeHPUX = OSTypeKey.String("hpux")
+	// AIX (Advanced Interactive eXecutive)
+	OSTypeAIX = OSTypeKey.String("aix")
+	// SunOS, Oracle Solaris
+	OSTypeSolaris = OSTypeKey.String("solaris")
+	// IBM z/OS
+	OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+	return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+	return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+	return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+	// ProcessPIDKey is the attribute Key conforming to the "process.pid"
+	// semantic conventions. It represents the process identifier (PID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 1234
+	ProcessPIDKey = attribute.Key("process.pid")
+
+	// ProcessParentPIDKey is the attribute Key conforming to the
+	// "process.parent_pid" semantic conventions. It represents the parent
+	// Process identifier (PID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 111
+	ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+	// ProcessExecutableNameKey is the attribute Key conforming to the
+	// "process.executable.name" semantic conventions. It represents the name
+	// of the process executable. On Linux based systems, can be set to the
+	// `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+	// of `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: 'otelcol'
+	ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+	// ProcessExecutablePathKey is the attribute Key conforming to the
+	// "process.executable.path" semantic conventions. It represents the full
+	// path to the process executable. On Linux based systems, can be set to
+	// the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+	// `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: '/usr/bin/cmd/otelcol'
+	ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+	// ProcessCommandKey is the attribute Key conforming to the
+	// "process.command" semantic conventions. It represents the command used
+	// to launch the process (i.e. the command name). On Linux based systems,
+	// can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+	// be set to the first parameter extracted from `GetCommandLineW`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: 'cmd/otelcol'
+	ProcessCommandKey = attribute.Key("process.command")
+
+	// ProcessCommandLineKey is the attribute Key conforming to the
+	// "process.command_line" semantic conventions. It represents the full
+	// command used to launch the process as a single string representing the
+	// full command. On Windows, can be set to the result of `GetCommandLineW`.
+	// Do not set this if you have to assemble it just for monitoring; use
+	// `process.command_args` instead.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+	ProcessCommandLineKey = attribute.Key("process.command_line")
+
+	// ProcessCommandArgsKey is the attribute Key conforming to the
+	// "process.command_args" semantic conventions. It represents the all the
+	// command arguments (including the command/executable itself) as received
+	// by the process. On Linux-based systems (and some other Unixoid systems
+	// supporting procfs), can be set according to the list of null-delimited
+	// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+	// this would be the full argv vector passed to `main`.
+	//
+	// Type: string[]
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: stable
+	// Examples: 'cmd/otecol', '--config=config.yaml'
+	ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+	// ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+	// semantic conventions. It represents the username of the user that owns
+	// the process.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'root'
+	ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+	return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+	return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+	return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+	return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+	return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+	return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+	return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+	return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+	// ProcessRuntimeNameKey is the attribute Key conforming to the
+	// "process.runtime.name" semantic conventions. It represents the name of
+	// the runtime of this process. For compiled native binaries, this SHOULD
+	// be the name of the compiler.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'OpenJDK Runtime Environment'
+	ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+	// ProcessRuntimeVersionKey is the attribute Key conforming to the
+	// "process.runtime.version" semantic conventions. It represents the
+	// version of the runtime of this process, as returned by the runtime
+	// without modification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '14.0.2'
+	ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+	// ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+	// "process.runtime.description" semantic conventions. It represents an
+	// additional description about the runtime of the process, for example a
+	// specific vendor customization of the runtime environment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+	ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+	return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+	return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+	return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+	// ServiceNameKey is the attribute Key conforming to the "service.name"
+	// semantic conventions. It represents the logical name of the service.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'shoppingcart'
+	// Note: MUST be the same for all instances of horizontally scaled
+	// services. If the value was not specified, SDKs MUST fallback to
+	// `unknown_service:` concatenated with
+	// [`process.executable.name`](process.md#process), e.g.
+	// `unknown_service:bash`. If `process.executable.name` is not available,
+	// the value MUST be set to `unknown_service`.
+	ServiceNameKey = attribute.Key("service.name")
+
+	// ServiceNamespaceKey is the attribute Key conforming to the
+	// "service.namespace" semantic conventions. It represents a namespace for
+	// `service.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Shop'
+	// Note: A string value having a meaning that helps to distinguish a group
+	// of services, for example the team name that owns a group of services.
+	// `service.name` is expected to be unique within the same namespace. If
+	// `service.namespace` is not specified in the Resource then `service.name`
+	// is expected to be unique for all services that have no explicit
+	// namespace defined (so the empty/unspecified namespace is simply one more
+	// valid namespace). Zero-length namespace string is assumed equal to
+	// unspecified namespace.
+	ServiceNamespaceKey = attribute.Key("service.namespace")
+
+	// ServiceInstanceIDKey is the attribute Key conforming to the
+	// "service.instance.id" semantic conventions. It represents the string ID
+	// of the service instance.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+	// Note: MUST be unique for each instance of the same
+	// `service.namespace,service.name` pair (in other words
+	// `service.namespace,service.name,service.instance.id` triplet MUST be
+	// globally unique). The ID helps to distinguish instances of the same
+	// service that exist at the same time (e.g. instances of a horizontally
+	// scaled service). It is preferable for the ID to be persistent and stay
+	// the same for the lifetime of the service instance, however it is
+	// acceptable that the ID is ephemeral and changes during important
+	// lifetime events for the service (e.g. service restarts). If the service
+	// has no inherent unique ID that can be used as the value of this
+	// attribute it is recommended to generate a random Version 1 or Version 4
+	// RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+	// Version 5, see RFC 4122 for more recommendations).
+	ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+	// ServiceVersionKey is the attribute Key conforming to the
+	// "service.version" semantic conventions. It represents the version string
+	// of the service API or implementation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2.0.0'
+	ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+	return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+	return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+	return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+	return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+	// TelemetrySDKNameKey is the attribute Key conforming to the
+	// "telemetry.sdk.name" semantic conventions. It represents the name of the
+	// telemetry SDK as defined above.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'opentelemetry'
+	TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+	// TelemetrySDKLanguageKey is the attribute Key conforming to the
+	// "telemetry.sdk.language" semantic conventions. It represents the
+	// language of the telemetry SDK.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+	// TelemetrySDKVersionKey is the attribute Key conforming to the
+	// "telemetry.sdk.version" semantic conventions. It represents the version
+	// string of the telemetry SDK.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1.2.3'
+	TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+	// TelemetryAutoVersionKey is the attribute Key conforming to the
+	// "telemetry.auto.version" semantic conventions. It represents the version
+	// string of the auto instrumentation agent, if used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1.2.3'
+	TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+	// cpp
+	TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+	// dotnet
+	TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+	// erlang
+	TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+	// go
+	TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+	// java
+	TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+	// nodejs
+	TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+	// php
+	TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+	// python
+	TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+	// ruby
+	TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+	// webjs
+	TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+	// swift
+	TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+	return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+	return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+	return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+	// WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+	// semantic conventions. It represents the name of the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'WildFly'
+	WebEngineNameKey = attribute.Key("webengine.name")
+
+	// WebEngineVersionKey is the attribute Key conforming to the
+	// "webengine.version" semantic conventions. It represents the version of
+	// the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '21.0.0'
+	WebEngineVersionKey = attribute.Key("webengine.version")
+
+	// WebEngineDescriptionKey is the attribute Key conforming to the
+	// "webengine.description" semantic conventions. It represents the
+	// additional description of the web engine (e.g. detailed version and
+	// edition information).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+	// 2.2.2.Final'
+	WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+	return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+	return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+	return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+	// OtelScopeNameKey is the attribute Key conforming to the
+	// "otel.scope.name" semantic conventions. It represents the name of the
+	// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'io.opentelemetry.contrib.mongodb'
+	OtelScopeNameKey = attribute.Key("otel.scope.name")
+
+	// OtelScopeVersionKey is the attribute Key conforming to the
+	// "otel.scope.version" semantic conventions. It represents the version of
+	// the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1.0.0'
+	OtelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OtelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OtelScopeName(val string) attribute.KeyValue {
+	return OtelScopeNameKey.String(val)
+}
+
+// OtelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OtelScopeVersion(val string) attribute.KeyValue {
+	return OtelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+	// OtelLibraryNameKey is the attribute Key conforming to the
+	// "otel.library.name" semantic conventions. It represents the deprecated,
+	// use the `otel.scope.name` attribute.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'io.opentelemetry.contrib.mongodb'
+	OtelLibraryNameKey = attribute.Key("otel.library.name")
+
+	// OtelLibraryVersionKey is the attribute Key conforming to the
+	// "otel.library.version" semantic conventions. It represents the
+	// deprecated, use the `otel.scope.version` attribute.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '1.0.0'
+	OtelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OtelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OtelLibraryName(val string) attribute.KeyValue {
+	return OtelLibraryNameKey.String(val)
+}
+
+// OtelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OtelLibraryVersion(val string) attribute.KeyValue {
+	return OtelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
new file mode 100644
index 000000000..42fc525d1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
new file mode 100644
index 000000000..8c4a7299d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
@@ -0,0 +1,3375 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+	// ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+	// semantic conventions. It represents the type of the exception (its
+	// fully-qualified class name, if applicable). The dynamic type of the
+	// exception should be preferred over the static type in languages that
+	// support it.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'java.net.ConnectException', 'OSError'
+	ExceptionTypeKey = attribute.Key("exception.type")
+
+	// ExceptionMessageKey is the attribute Key conforming to the
+	// "exception.message" semantic conventions. It represents the exception
+	// message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Division by zero', "Can't convert 'int' object to str
+	// implicitly"
+	ExceptionMessageKey = attribute.Key("exception.message")
+
+	// ExceptionStacktraceKey is the attribute Key conforming to the
+	// "exception.stacktrace" semantic conventions. It represents a stacktrace
+	// as a string in the natural representation for the language runtime. The
+	// representation is to be determined and documented by each language SIG.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+	// exception\\n at '
+	//  'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+	ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+	return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+	return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+	return ExceptionStacktraceKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+	// EventNameKey is the attribute Key conforming to the "event.name"
+	// semantic conventions. It represents the name identifies the event.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'click', 'exception'
+	EventNameKey = attribute.Key("event.name")
+
+	// EventDomainKey is the attribute Key conforming to the "event.domain"
+	// semantic conventions. It represents the domain identifies the business
+	// context for the events.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	// Note: Events across different domains may have same `event.name`, yet be
+	// unrelated events.
+	EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+	// Events from browser apps
+	EventDomainBrowser = EventDomainKey.String("browser")
+	// Events from mobile apps
+	EventDomainDevice = EventDomainKey.String("device")
+	// Events from Kubernetes
+	EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+	return EventNameKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+	// AWSLambdaInvokedARNKey is the attribute Key conforming to the
+	// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+	// invoked ARN as provided on the `Context` passed to the function
+	// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+	// `/runtime/invocation/next` applicable).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+	// Note: This may be different from `faas.id` if an alias is involved.
+	AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+	return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+	// CloudeventsEventIDKey is the attribute Key conforming to the
+	// "cloudevents.event_id" semantic conventions. It represents the
+	// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+	// uniquely identifies the event.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+	CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+	// CloudeventsEventSourceKey is the attribute Key conforming to the
+	// "cloudevents.event_source" semantic conventions. It represents the
+	// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+	// identifies the context in which an event happened.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'https://github.com/cloudevents',
+	// '/cloudevents/spec/pull/123', 'my-service'
+	CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+	// CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+	// "cloudevents.event_spec_version" semantic conventions. It represents the
+	// [version of the CloudEvents
+	// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+	// which the event uses.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '1.0'
+	CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+	// CloudeventsEventTypeKey is the attribute Key conforming to the
+	// "cloudevents.event_type" semantic conventions. It represents the
+	// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+	// contains a value describing the type of event related to the originating
+	// occurrence.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'com.github.pull_request.opened',
+	// 'com.example.object.deleted.v2'
+	CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+	// CloudeventsEventSubjectKey is the attribute Key conforming to the
+	// "cloudevents.event_subject" semantic conventions. It represents the
+	// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+	// of the event in the context of the event producer (identified by
+	// source).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'mynewfile.jpg'
+	CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+	return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+	return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+	return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+	return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+	return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+	// OpentracingRefTypeKey is the attribute Key conforming to the
+	// "opentracing.ref_type" semantic conventions. It represents the
+	// parent-child Reference type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: The causal relationship between a child Span and a parent Span.
+	OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+	// The parent Span depends on the child Span in some capacity
+	OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+	// The parent Span does not depend in any way on the result of the child Span
+	OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+	// DBSystemKey is the attribute Key conforming to the "db.system" semantic
+	// conventions. It represents an identifier for the database management
+	// system (DBMS) product being used. See below for a list of well-known
+	// identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	DBSystemKey = attribute.Key("db.system")
+
+	// DBConnectionStringKey is the attribute Key conforming to the
+	// "db.connection_string" semantic conventions. It represents the
+	// connection string used to connect to the database. It is recommended to
+	// remove embedded credentials.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+	DBConnectionStringKey = attribute.Key("db.connection_string")
+
+	// DBUserKey is the attribute Key conforming to the "db.user" semantic
+	// conventions. It represents the username for accessing the database.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'readonly_user', 'reporting_user'
+	DBUserKey = attribute.Key("db.user")
+
+	// DBJDBCDriverClassnameKey is the attribute Key conforming to the
+	// "db.jdbc.driver_classname" semantic conventions. It represents the
+	// fully-qualified class name of the [Java Database Connectivity
+	// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+	// driver used to connect.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'org.postgresql.Driver',
+	// 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+	DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+	// DBNameKey is the attribute Key conforming to the "db.name" semantic
+	// conventions. It represents the this attribute is used to report the name
+	// of the database being accessed. For commands that switch the database,
+	// this should be set to the target database (even if the command fails).
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If applicable.)
+	// Stability: stable
+	// Examples: 'customers', 'main'
+	// Note: In some SQL databases, the database name to be used is called
+	// "schema name". In case there are multiple layers that could be
+	// considered for database name (e.g. Oracle instance name and schema
+	// name), the database name to be used is the more specific layer (e.g.
+	// Oracle schema name).
+	DBNameKey = attribute.Key("db.name")
+
+	// DBStatementKey is the attribute Key conforming to the "db.statement"
+	// semantic conventions. It represents the database statement being
+	// executed.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If applicable and not
+	// explicitly disabled via instrumentation configuration.)
+	// Stability: stable
+	// Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+	// Note: The value may be sanitized to exclude sensitive information.
+	DBStatementKey = attribute.Key("db.statement")
+
+	// DBOperationKey is the attribute Key conforming to the "db.operation"
+	// semantic conventions. It represents the name of the operation being
+	// executed, e.g. the [MongoDB command
+	// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+	// such as `findAndModify`, or the SQL keyword.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If `db.statement` is not
+	// applicable.)
+	// Stability: stable
+	// Examples: 'findAndModify', 'HMSET', 'SELECT'
+	// Note: When setting this to an SQL keyword, it is not recommended to
+	// attempt any client-side parsing of `db.statement` just to get this
+	// property, but it should be set if the operation name is provided by the
+	// library being instrumented. If the SQL statement has an ambiguous
+	// operation, or performs more than one operation, this value may be
+	// omitted.
+	DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+	// Some other SQL database. Fallback only. See notes
+	DBSystemOtherSQL = DBSystemKey.String("other_sql")
+	// Microsoft SQL Server
+	DBSystemMSSQL = DBSystemKey.String("mssql")
+	// MySQL
+	DBSystemMySQL = DBSystemKey.String("mysql")
+	// Oracle Database
+	DBSystemOracle = DBSystemKey.String("oracle")
+	// IBM DB2
+	DBSystemDB2 = DBSystemKey.String("db2")
+	// PostgreSQL
+	DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+	// Amazon Redshift
+	DBSystemRedshift = DBSystemKey.String("redshift")
+	// Apache Hive
+	DBSystemHive = DBSystemKey.String("hive")
+	// Cloudscape
+	DBSystemCloudscape = DBSystemKey.String("cloudscape")
+	// HyperSQL DataBase
+	DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+	// Progress Database
+	DBSystemProgress = DBSystemKey.String("progress")
+	// SAP MaxDB
+	DBSystemMaxDB = DBSystemKey.String("maxdb")
+	// SAP HANA
+	DBSystemHanaDB = DBSystemKey.String("hanadb")
+	// Ingres
+	DBSystemIngres = DBSystemKey.String("ingres")
+	// FirstSQL
+	DBSystemFirstSQL = DBSystemKey.String("firstsql")
+	// EnterpriseDB
+	DBSystemEDB = DBSystemKey.String("edb")
+	// InterSystems Caché
+	DBSystemCache = DBSystemKey.String("cache")
+	// Adabas (Adaptable Database System)
+	DBSystemAdabas = DBSystemKey.String("adabas")
+	// Firebird
+	DBSystemFirebird = DBSystemKey.String("firebird")
+	// Apache Derby
+	DBSystemDerby = DBSystemKey.String("derby")
+	// FileMaker
+	DBSystemFilemaker = DBSystemKey.String("filemaker")
+	// Informix
+	DBSystemInformix = DBSystemKey.String("informix")
+	// InstantDB
+	DBSystemInstantDB = DBSystemKey.String("instantdb")
+	// InterBase
+	DBSystemInterbase = DBSystemKey.String("interbase")
+	// MariaDB
+	DBSystemMariaDB = DBSystemKey.String("mariadb")
+	// Netezza
+	DBSystemNetezza = DBSystemKey.String("netezza")
+	// Pervasive PSQL
+	DBSystemPervasive = DBSystemKey.String("pervasive")
+	// PointBase
+	DBSystemPointbase = DBSystemKey.String("pointbase")
+	// SQLite
+	DBSystemSqlite = DBSystemKey.String("sqlite")
+	// Sybase
+	DBSystemSybase = DBSystemKey.String("sybase")
+	// Teradata
+	DBSystemTeradata = DBSystemKey.String("teradata")
+	// Vertica
+	DBSystemVertica = DBSystemKey.String("vertica")
+	// H2
+	DBSystemH2 = DBSystemKey.String("h2")
+	// ColdFusion IMQ
+	DBSystemColdfusion = DBSystemKey.String("coldfusion")
+	// Apache Cassandra
+	DBSystemCassandra = DBSystemKey.String("cassandra")
+	// Apache HBase
+	DBSystemHBase = DBSystemKey.String("hbase")
+	// MongoDB
+	DBSystemMongoDB = DBSystemKey.String("mongodb")
+	// Redis
+	DBSystemRedis = DBSystemKey.String("redis")
+	// Couchbase
+	DBSystemCouchbase = DBSystemKey.String("couchbase")
+	// CouchDB
+	DBSystemCouchDB = DBSystemKey.String("couchdb")
+	// Microsoft Azure Cosmos DB
+	DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+	// Amazon DynamoDB
+	DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+	// Neo4j
+	DBSystemNeo4j = DBSystemKey.String("neo4j")
+	// Apache Geode
+	DBSystemGeode = DBSystemKey.String("geode")
+	// Elasticsearch
+	DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+	// Memcached
+	DBSystemMemcached = DBSystemKey.String("memcached")
+	// CockroachDB
+	DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+	// OpenSearch
+	DBSystemOpensearch = DBSystemKey.String("opensearch")
+	// ClickHouse
+	DBSystemClickhouse = DBSystemKey.String("clickhouse")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+	return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+	return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+	return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+	return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+	return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+	return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+	// DBMSSQLInstanceNameKey is the attribute Key conforming to the
+	// "db.mssql.instance_name" semantic conventions. It represents the
+	// Microsoft SQL Server [instance
+	// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+	// connecting to. This name is used to determine the port of a named
+	// instance.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'MSSQLSERVER'
+	// Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+	// longer required (but still recommended if non-standard).
+	DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+	return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+	// DBCassandraPageSizeKey is the attribute Key conforming to the
+	// "db.cassandra.page_size" semantic conventions. It represents the fetch
+	// size used for paging, i.e. how many rows will be returned at once.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 5000
+	DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+	// DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+	// "db.cassandra.consistency_level" semantic conventions. It represents the
+	// consistency level of the query. Based on consistency values from
+	// [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+	// DBCassandraTableKey is the attribute Key conforming to the
+	// "db.cassandra.table" semantic conventions. It represents the name of the
+	// primary table that the operation is acting upon, including the keyspace
+	// name (if applicable).
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'mytable'
+	// Note: This mirrors the db.sql.table attribute but references cassandra
+	// rather than sql. It is not recommended to attempt any client-side
+	// parsing of `db.statement` just to get this property, but it should be
+	// set if it is provided by the library being instrumented. If the
+	// operation is acting upon an anonymous table, or more than one table,
+	// this value MUST NOT be set.
+	DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+	// DBCassandraIdempotenceKey is the attribute Key conforming to the
+	// "db.cassandra.idempotence" semantic conventions. It represents the
+	// whether or not the query is idempotent.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+	// DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+	// to the "db.cassandra.speculative_execution_count" semantic conventions.
+	// It represents the number of times a query was speculatively executed.
+	// Not set or `0` if the query was not executed speculatively.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 0, 2
+	DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+	// DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+	// of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+	DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+	// DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.dc" semantic conventions. It represents the
+	// data center of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'us-west-2'
+	DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+	// all
+	DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+	// each_quorum
+	DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+	// quorum
+	DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+	// local_quorum
+	DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+	// one
+	DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+	// two
+	DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+	// three
+	DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+	// local_one
+	DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+	// any
+	DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+	// serial
+	DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+	// local_serial
+	DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+	return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+	return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+	return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+	return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+	// DBRedisDBIndexKey is the attribute Key conforming to the
+	// "db.redis.database_index" semantic conventions. It represents the index
+	// of the database being accessed as used in the [`SELECT`
+	// command](https://redis.io/commands/select), provided as an integer. To
+	// be used instead of the generic `db.name` attribute.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If other than the default
+	// database (`0`).)
+	// Stability: stable
+	// Examples: 0, 1, 15
+	DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+	return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+	// DBMongoDBCollectionKey is the attribute Key conforming to the
+	// "db.mongodb.collection" semantic conventions. It represents the
+	// collection being accessed within the database stated in `db.name`.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'customers', 'products'
+	DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+	return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+	// DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+	// semantic conventions. It represents the name of the primary table that
+	// the operation is acting upon, including the database name (if
+	// applicable).
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'public.users', 'customers'
+	// Note: It is not recommended to attempt any client-side parsing of
+	// `db.statement` just to get this property, but it should be set if it is
+	// provided by the library being instrumented. If the operation is acting
+	// upon an anonymous table, or more than one table, this value MUST NOT be
+	// set.
+	DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+	return DBSQLTableKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+	// OtelStatusCodeKey is the attribute Key conforming to the
+	// "otel.status_code" semantic conventions. It represents the name of the
+	// code, either "OK" or "ERROR". MUST NOT be set if the status code is
+	// UNSET.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	OtelStatusCodeKey = attribute.Key("otel.status_code")
+
+	// OtelStatusDescriptionKey is the attribute Key conforming to the
+	// "otel.status_description" semantic conventions. It represents the
+	// description of the Status if it has a value, otherwise not set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'resource not found'
+	OtelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+	// The operation has been validated by an Application developer or Operator to have completed successfully
+	OtelStatusCodeOk = OtelStatusCodeKey.String("OK")
+	// The operation contains an error
+	OtelStatusCodeError = OtelStatusCodeKey.String("ERROR")
+)
+
+// OtelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OtelStatusDescription(val string) attribute.KeyValue {
+	return OtelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+	// FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+	// semantic conventions. It represents the type of the trigger which caused
+	// this function execution.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: For the server/consumer span on the incoming side,
+	// `faas.trigger` MUST be set.
+	//
+	// Clients invoking FaaS instances usually cannot set `faas.trigger`,
+	// since they would typically need to look in the payload to determine
+	// the event type. If clients set it, it should be the same as the
+	// trigger that corresponding incoming would have (i.e., this has
+	// nothing to do with the underlying transport used to make the API
+	// call to invoke the lambda, which is often HTTP).
+	FaaSTriggerKey = attribute.Key("faas.trigger")
+
+	// FaaSExecutionKey is the attribute Key conforming to the "faas.execution"
+	// semantic conventions. It represents the execution ID of the current
+	// function execution.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+	FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+	// A response to some data source operation such as a database or filesystem read/write
+	FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+	// To provide an answer to an inbound HTTP request
+	FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+	// A function is set to be executed when messages are sent to a messaging system
+	FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+	// A function is scheduled to be executed regularly
+	FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+	// If none of the others apply
+	FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSExecution returns an attribute KeyValue conforming to the
+// "faas.execution" semantic conventions. It represents the execution ID of the
+// current function execution.
+func FaaSExecution(val string) attribute.KeyValue {
+	return FaaSExecutionKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+	// FaaSDocumentCollectionKey is the attribute Key conforming to the
+	// "faas.document.collection" semantic conventions. It represents the name
+	// of the source on which the triggering operation was performed. For
+	// example, in Cloud Storage or S3 corresponds to the bucket name, and in
+	// Cosmos DB to the database name.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'myBucketName', 'myDBName'
+	FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+	// FaaSDocumentOperationKey is the attribute Key conforming to the
+	// "faas.document.operation" semantic conventions. It represents the
+	// describes the type of the operation that was performed on the data.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+	// FaaSDocumentTimeKey is the attribute Key conforming to the
+	// "faas.document.time" semantic conventions. It represents a string
+	// containing the time when the data was accessed in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+	// FaaSDocumentNameKey is the attribute Key conforming to the
+	// "faas.document.name" semantic conventions. It represents the document
+	// name/table subjected to the operation. For example, in Cloud Storage or
+	// S3 is the name of the file, and in Cosmos DB the table name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'myFile.txt', 'myTableName'
+	FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+	// When a new object is created
+	FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+	// When an object is modified
+	FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+	// When an object is deleted
+	FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+	return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+	return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+	return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+	// FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+	// conventions. It represents a string containing the function invocation
+	// time in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSTimeKey = attribute.Key("faas.time")
+
+	// FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+	// conventions. It represents a string containing the schedule period as
+	// [Cron
+	// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '0/5 * * * ? *'
+	FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+	return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+	return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+	// FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+	// semantic conventions. It represents a boolean that is true if the
+	// serverless function is executed for the first time (aka cold-start).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+	return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+	// FaaSInvokedNameKey is the attribute Key conforming to the
+	// "faas.invoked_name" semantic conventions. It represents the name of the
+	// invoked function.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'my-function'
+	// Note: SHOULD be equal to the `faas.name` resource attribute of the
+	// invoked function.
+	FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+	// FaaSInvokedProviderKey is the attribute Key conforming to the
+	// "faas.invoked_provider" semantic conventions. It represents the cloud
+	// provider of the invoked function.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	// Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+	// invoked function.
+	FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+	// FaaSInvokedRegionKey is the attribute Key conforming to the
+	// "faas.invoked_region" semantic conventions. It represents the cloud
+	// region of the invoked function.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (For some cloud providers, like
+	// AWS or GCP, the region in which a function is hosted is essential to
+	// uniquely identify the function and also part of its endpoint. Since it's
+	// part of the endpoint being called, the region is always known to
+	// clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+	// If the region is unknown to the client or not required for identifying
+	// the invoked function, setting `faas.invoked_region` is optional.)
+	// Stability: stable
+	// Examples: 'eu-central-1'
+	// Note: SHOULD be equal to the `cloud.region` resource attribute of the
+	// invoked function.
+	FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+	// Alibaba Cloud
+	FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+	// Microsoft Azure
+	FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+	// Google Cloud Platform
+	FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+	// Tencent Cloud
+	FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+	return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+	return FaaSInvokedRegionKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+	// NetTransportKey is the attribute Key conforming to the "net.transport"
+	// semantic conventions. It represents the transport protocol used. See
+	// note below.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	NetTransportKey = attribute.Key("net.transport")
+
+	// NetAppProtocolNameKey is the attribute Key conforming to the
+	// "net.app.protocol.name" semantic conventions. It represents the
+	// application layer protocol used. The value SHOULD be normalized to
+	// lowercase.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'amqp', 'http', 'mqtt'
+	NetAppProtocolNameKey = attribute.Key("net.app.protocol.name")
+
+	// NetAppProtocolVersionKey is the attribute Key conforming to the
+	// "net.app.protocol.version" semantic conventions. It represents the
+	// version of the application layer protocol used. See note below.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '3.1.1'
+	// Note: `net.app.protocol.version` refers to the version of the protocol
+	// used and might be different from the protocol client's version. If the
+	// HTTP client used has a version of `0.27.2`, but sends HTTP version
+	// `1.1`, this attribute should be set to `1.1`.
+	NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version")
+
+	// NetSockPeerNameKey is the attribute Key conforming to the
+	// "net.sock.peer.name" semantic conventions. It represents the remote
+	// socket peer name.
+	//
+	// Type: string
+	// RequirementLevel: Recommended (If available and different from
+	// `net.peer.name` and if `net.sock.peer.addr` is set.)
+	// Stability: stable
+	// Examples: 'proxy.example.com'
+	NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+	// NetSockPeerAddrKey is the attribute Key conforming to the
+	// "net.sock.peer.addr" semantic conventions. It represents the remote
+	// socket peer address: IPv4 or IPv6 for internet protocols, path for local
+	// communication,
+	// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '127.0.0.1', '/tmp/mysql.sock'
+	NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+	// NetSockPeerPortKey is the attribute Key conforming to the
+	// "net.sock.peer.port" semantic conventions. It represents the remote
+	// socket peer port.
+	//
+	// Type: int
+	// RequirementLevel: Recommended (If defined for the address family and if
+	// different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+	// Stability: stable
+	// Examples: 16456
+	NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+	// NetSockFamilyKey is the attribute Key conforming to the
+	// "net.sock.family" semantic conventions. It represents the protocol
+	// [address
+	// family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+	// which is used for communication.
+	//
+	// Type: Enum
+	// RequirementLevel: ConditionallyRequired (If different than `inet` and if
+	// any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+	// of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+	// `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+	// instrumentations that follow previous versions of this document.)
+	// Stability: stable
+	// Examples: 'inet6', 'bluetooth'
+	NetSockFamilyKey = attribute.Key("net.sock.family")
+
+	// NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+	// semantic conventions. It represents the logical remote hostname, see
+	// note below.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'example.com'
+	// Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+	// extra DNS lookup.
+	NetPeerNameKey = attribute.Key("net.peer.name")
+
+	// NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+	// semantic conventions. It represents the logical remote port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 80, 8080, 443
+	NetPeerPortKey = attribute.Key("net.peer.port")
+
+	// NetHostNameKey is the attribute Key conforming to the "net.host.name"
+	// semantic conventions. It represents the logical local hostname or
+	// similar, see note below.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'localhost'
+	NetHostNameKey = attribute.Key("net.host.name")
+
+	// NetHostPortKey is the attribute Key conforming to the "net.host.port"
+	// semantic conventions. It represents the logical local port number,
+	// preferably the one that the peer used to connect
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 8080
+	NetHostPortKey = attribute.Key("net.host.port")
+
+	// NetSockHostAddrKey is the attribute Key conforming to the
+	// "net.sock.host.addr" semantic conventions. It represents the local
+	// socket address. Useful in case of a multi-IP host.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '192.168.0.1'
+	NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+	// NetSockHostPortKey is the attribute Key conforming to the
+	// "net.sock.host.port" semantic conventions. It represents the local
+	// socket port number.
+	//
+	// Type: int
+	// RequirementLevel: Recommended (If defined for the address family and if
+	// different than `net.host.port` and if `net.sock.host.addr` is set.)
+	// Stability: stable
+	// Examples: 35555
+	NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+	// NetHostConnectionTypeKey is the attribute Key conforming to the
+	// "net.host.connection.type" semantic conventions. It represents the
+	// internet connection type currently being used by the host.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'wifi'
+	NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+	// NetHostConnectionSubtypeKey is the attribute Key conforming to the
+	// "net.host.connection.subtype" semantic conventions. It represents the
+	// this describes more details regarding the connection.type. It may be the
+	// type of cell technology connection, but it could be used for describing
+	// details about a wifi connection.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'LTE'
+	NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+	// NetHostCarrierNameKey is the attribute Key conforming to the
+	// "net.host.carrier.name" semantic conventions. It represents the name of
+	// the mobile carrier.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'sprint'
+	NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+	// NetHostCarrierMccKey is the attribute Key conforming to the
+	// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+	// carrier country code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '310'
+	NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+	// NetHostCarrierMncKey is the attribute Key conforming to the
+	// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+	// carrier network code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '001'
+	NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+	// NetHostCarrierIccKey is the attribute Key conforming to the
+	// "net.host.carrier.icc" semantic conventions. It represents the ISO
+	// 3166-1 alpha-2 2-character country code associated with the mobile
+	// carrier network.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'DE'
+	NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+	// ip_tcp
+	NetTransportTCP = NetTransportKey.String("ip_tcp")
+	// ip_udp
+	NetTransportUDP = NetTransportKey.String("ip_udp")
+	// Named or anonymous pipe. See note below
+	NetTransportPipe = NetTransportKey.String("pipe")
+	// In-process communication
+	NetTransportInProc = NetTransportKey.String("inproc")
+	// Something else (non IP-based)
+	NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+	// IPv4 address
+	NetSockFamilyInet = NetSockFamilyKey.String("inet")
+	// IPv6 address
+	NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+	// Unix domain socket path
+	NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+	// wifi
+	NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+	// wired
+	NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+	// cell
+	NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+	// unavailable
+	NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+	// unknown
+	NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+	// GPRS
+	NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+	// EDGE
+	NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+	// UMTS
+	NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+	// CDMA
+	NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+	// EVDO Rel. 0
+	NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+	// EVDO Rev. A
+	NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+	// CDMA2000 1XRTT
+	NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+	// HSDPA
+	NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+	// HSUPA
+	NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+	// HSPA
+	NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+	// IDEN
+	NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+	// EVDO Rev. B
+	NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+	// LTE
+	NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+	// EHRPD
+	NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+	// HSPAP
+	NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+	// GSM
+	NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+	// TD-SCDMA
+	NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+	// IWLAN
+	NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+	// 5G NR (New Radio)
+	NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+	// 5G NRNSA (New Radio Non-Standalone)
+	NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+	// LTE CA
+	NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetAppProtocolName returns an attribute KeyValue conforming to the
+// "net.app.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetAppProtocolName(val string) attribute.KeyValue {
+	return NetAppProtocolNameKey.String(val)
+}
+
+// NetAppProtocolVersion returns an attribute KeyValue conforming to the
+// "net.app.protocol.version" semantic conventions. It represents the version
+// of the application layer protocol used. See note below.
+func NetAppProtocolVersion(val string) attribute.KeyValue {
+	return NetAppProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+	return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+	return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+	return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+	return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+	return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+	return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+	return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+	return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+	return NetSockHostPortKey.Int(val)
+}
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+	return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+	return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+	return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+	return NetHostCarrierIccKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+	// PeerServiceKey is the attribute Key conforming to the "peer.service"
+	// semantic conventions. It represents the
+	// [`service.name`](../../resource/semantic_conventions/README.md#service)
+	// of the remote service. SHOULD be equal to the actual `service.name`
+	// resource attribute of the remote service if any.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'AuthTokenCache'
+	PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+	return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+	// EnduserIDKey is the attribute Key conforming to the "enduser.id"
+	// semantic conventions. It represents the username or client_id extracted
+	// from the access token or
+	// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+	// in the inbound request from outside the system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'username'
+	EnduserIDKey = attribute.Key("enduser.id")
+
+	// EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+	// semantic conventions. It represents the actual/assumed role the client
+	// is making the request under extracted from token or application security
+	// context.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'admin'
+	EnduserRoleKey = attribute.Key("enduser.role")
+
+	// EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+	// semantic conventions. It represents the scopes or granted authorities
+	// the client currently possesses extracted from token or application
+	// security context. The value would come from the scope associated with an
+	// [OAuth 2.0 Access
+	// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+	// value in a [SAML 2.0
+	// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'read:message, write:files'
+	EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+	return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+	return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+	return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+	// ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+	// conventions. It represents the current "managed" thread ID (as opposed
+	// to OS thread ID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 42
+	ThreadIDKey = attribute.Key("thread.id")
+
+	// ThreadNameKey is the attribute Key conforming to the "thread.name"
+	// semantic conventions. It represents the current thread name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'main'
+	ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+	return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+	return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+	// CodeFunctionKey is the attribute Key conforming to the "code.function"
+	// semantic conventions. It represents the method or function name, or
+	// equivalent (usually rightmost part of the code unit's name).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'serveRequest'
+	CodeFunctionKey = attribute.Key("code.function")
+
+	// CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+	// semantic conventions. It represents the "namespace" within which
+	// `code.function` is defined. Usually the qualified class or module name,
+	// such that `code.namespace` + some separator + `code.function` form a
+	// unique identifier for the code unit.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'com.example.MyHTTPService'
+	CodeNamespaceKey = attribute.Key("code.namespace")
+
+	// CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+	// semantic conventions. It represents the source code file name that
+	// identifies the code unit as uniquely as possible (preferably an absolute
+	// file path).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/usr/local/MyApplication/content_root/app/index.php'
+	CodeFilepathKey = attribute.Key("code.filepath")
+
+	// CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+	// semantic conventions. It represents the line number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 42
+	CodeLineNumberKey = attribute.Key("code.lineno")
+
+	// CodeColumnKey is the attribute Key conforming to the "code.column"
+	// semantic conventions. It represents the column number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 16
+	CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+	return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+	return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+	return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+	return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+	return CodeColumnKey.Int(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+	// HTTPMethodKey is the attribute Key conforming to the "http.method"
+	// semantic conventions. It represents the hTTP request method.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'GET', 'POST', 'HEAD'
+	HTTPMethodKey = attribute.Key("http.method")
+
+	// HTTPStatusCodeKey is the attribute Key conforming to the
+	// "http.status_code" semantic conventions. It represents the [HTTP
+	// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If and only if one was
+	// received/sent.)
+	// Stability: stable
+	// Examples: 200
+	HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+	// HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
+	// semantic conventions. It represents the kind of HTTP protocol used.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Note: If `net.transport` is not specified, it can be assumed to be
+	// `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is
+	// assumed.
+	HTTPFlavorKey = attribute.Key("http.flavor")
+
+	// HTTPUserAgentKey is the attribute Key conforming to the
+	// "http.user_agent" semantic conventions. It represents the value of the
+	// [HTTP
+	// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+	// header sent by the client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+	HTTPUserAgentKey = attribute.Key("http.user_agent")
+
+	// HTTPRequestContentLengthKey is the attribute Key conforming to the
+	// "http.request_content_length" semantic conventions. It represents the
+	// size of the request payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 3495
+	HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+	// HTTPResponseContentLengthKey is the attribute Key conforming to the
+	// "http.response_content_length" semantic conventions. It represents the
+	// size of the response payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 3495
+	HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+var (
+	// HTTP/1.0
+	HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+	// HTTP/1.1
+	HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+	// HTTP/2
+	HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+	// HTTP/3
+	HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+	// SPDY protocol
+	HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+	// QUIC protocol
+	HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+	return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+	return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPUserAgent returns an attribute KeyValue conforming to the
+// "http.user_agent" semantic conventions. It represents the value of the [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func HTTPUserAgent(val string) attribute.KeyValue {
+	return HTTPUserAgentKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+	return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+	return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+	// HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+	// conventions. It represents the full HTTP request URL in the form
+	// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+	// not transmitted over HTTP, but if it is known, it should be included
+	// nevertheless.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+	// Note: `http.url` MUST NOT contain credentials passed via URL in form of
+	// `https://username:password@www.example.com/`. In such case the
+	// attribute's value should be `https://www.example.com/`.
+	HTTPURLKey = attribute.Key("http.url")
+
+	// HTTPResendCountKey is the attribute Key conforming to the
+	// "http.resend_count" semantic conventions. It represents the ordinal
+	// number of request resending attempt (for any reason, including
+	// redirects).
+	//
+	// Type: int
+	// RequirementLevel: Recommended (if and only if request was retried.)
+	// Stability: stable
+	// Examples: 3
+	// Note: The resend count SHOULD be updated each time an HTTP request gets
+	// resent by the client, regardless of what was the cause of the resending
+	// (e.g. redirection, authorization failure, 503 Server Unavailable,
+	// network issues, or any other).
+	HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+	return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+	return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+	// HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+	// semantic conventions. It represents the URI scheme identifying the used
+	// protocol.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'http', 'https'
+	HTTPSchemeKey = attribute.Key("http.scheme")
+
+	// HTTPTargetKey is the attribute Key conforming to the "http.target"
+	// semantic conventions. It represents the full request target as passed in
+	// a HTTP request line or equivalent.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: '/path/12314/?q=ddds'
+	HTTPTargetKey = attribute.Key("http.target")
+
+	// HTTPRouteKey is the attribute Key conforming to the "http.route"
+	// semantic conventions. It represents the matched route (path template in
+	// the format used by the respective server framework). See note below
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If and only if it's available)
+	// Stability: stable
+	// Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+	// Note: 'http.route' MUST NOT be populated when this is not supported by
+	// the HTTP server framework as the route attribute should have
+	// low-cardinality and the URI path can NOT substitute it.
+	HTTPRouteKey = attribute.Key("http.route")
+
+	// HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+	// semantic conventions. It represents the IP address of the original
+	// client behind all proxies, if known (e.g. from
+	// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '83.164.160.102'
+	// Note: This is not necessarily the same as `net.sock.peer.addr`, which
+	// would
+	// identify the network-level peer, which may be a proxy.
+	//
+	// This attribute should be set when a source of information different
+	// from the one used for `net.sock.peer.addr`, is available even if that
+	// other
+	// source just confirms the same value as `net.sock.peer.addr`.
+	// Rationale: For `net.sock.peer.addr`, one typically does not know if it
+	// comes from a proxy, reverse proxy, or the actual client. Setting
+	// `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+	// one is at least somewhat confident that the address is not that of
+	// the closest proxy.
+	HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+	return HTTPSchemeKey.String(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+	return HTTPTargetKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+	return HTTPRouteKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+	return HTTPClientIPKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+	// AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_names" semantic conventions. It represents the keys
+	// in the `RequestItems` object field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Users', 'Cats'
+	AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+	// AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+	// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+	// JSON-serialized value of each item in the `ConsumedCapacity` response
+	// field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+	// "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number }, "TableName": "string",
+	// "WriteCapacityUnits": number }'
+	AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+	// AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+	// the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+	// represents the JSON-serialized value of the `ItemCollectionMetrics`
+	// response field.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+	// blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+	// "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+	// "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+	// "SizeEstimateRangeGB": [ number ] } ] }'
+	AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+	// AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+	// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+	// represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+	// request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+	// AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+	// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+	// It represents the value of the
+	// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+	// AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+	// "aws.dynamodb.consistent_read" semantic conventions. It represents the
+	// value of the `ConsistentRead` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+	// AWSDynamoDBProjectionKey is the attribute Key conforming to the
+	// "aws.dynamodb.projection" semantic conventions. It represents the value
+	// of the `ProjectionExpression` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+	// RelatedItems, ProductReviews'
+	AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+	// AWSDynamoDBLimitKey is the attribute Key conforming to the
+	// "aws.dynamodb.limit" semantic conventions. It represents the value of
+	// the `Limit` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+	// AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+	// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+	// value of the `AttributesToGet` request parameter.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'lives', 'id'
+	AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+	// AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+	// "aws.dynamodb.index_name" semantic conventions. It represents the value
+	// of the `IndexName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'name_to_group'
+	AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+	// AWSDynamoDBSelectKey is the attribute Key conforming to the
+	// "aws.dynamodb.select" semantic conventions. It represents the value of
+	// the `Select` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'ALL_ATTRIBUTES', 'COUNT'
+	AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+	return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+	return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+	return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+	return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+	return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+	return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+	return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+	return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+	// AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `GlobalSecondaryIndexes` request field
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+	// "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+	// "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+	// "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+	// AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `LocalSecondaryIndexes` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "IndexARN": "string", "IndexName": "string",
+	// "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+	AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+	// AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+	// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+	// the value of the `ExclusiveStartTableName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Users', 'CatsTable'
+	AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+	// AWSDynamoDBTableCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_count" semantic conventions. It represents the the
+	// number of items in the `TableNames` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 20
+	AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+	return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+	return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+	// AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+	// "aws.dynamodb.scan_forward" semantic conventions. It represents the
+	// value of the `ScanIndexForward` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+	return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+	// AWSDynamoDBSegmentKey is the attribute Key conforming to the
+	// "aws.dynamodb.segment" semantic conventions. It represents the value of
+	// the `Segment` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+	// AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+	// "aws.dynamodb.total_segments" semantic conventions. It represents the
+	// value of the `TotalSegments` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 100
+	AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+	// AWSDynamoDBCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.count" semantic conventions. It represents the value of
+	// the `Count` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 10
+	AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+	// AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.scanned_count" semantic conventions. It represents the
+	// value of the `ScannedCount` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 50
+	AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+	return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+	return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+	return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+	return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+	// AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+	// the "aws.dynamodb.attribute_definitions" semantic conventions. It
+	// represents the JSON-serialized value of each item in the
+	// `AttributeDefinitions` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+	AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+	// AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+	// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+	// conventions. It represents the JSON-serialized value of each item in the
+	// the `GlobalSecondaryIndexUpdates` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+	// "ProvisionedThroughput": { "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+	// GraphqlOperationNameKey is the attribute Key conforming to the
+	// "graphql.operation.name" semantic conventions. It represents the name of
+	// the operation being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'findBookByID'
+	GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+	// GraphqlOperationTypeKey is the attribute Key conforming to the
+	// "graphql.operation.type" semantic conventions. It represents the type of
+	// the operation being executed.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'query', 'mutation', 'subscription'
+	GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+	// GraphqlDocumentKey is the attribute Key conforming to the
+	// "graphql.document" semantic conventions. It represents the GraphQL
+	// document being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+	// Note: The value may be sanitized to exclude sensitive information.
+	GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+	// GraphQL query
+	GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+	// GraphQL mutation
+	GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+	// GraphQL subscription
+	GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+	return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+	return GraphqlDocumentKey.String(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+	// MessagingMessageIDKey is the attribute Key conforming to the
+	// "messaging.message.id" semantic conventions. It represents a value used
+	// by the messaging system as an identifier for the message, represented as
+	// a string.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+	MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+	// MessagingMessageConversationIDKey is the attribute Key conforming to the
+	// "messaging.message.conversation_id" semantic conventions. It represents
+	// the [conversation ID](#conversations) identifying the conversation to
+	// which the message belongs, represented as a string. Sometimes called
+	// "Correlation ID".
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'MyConversationID'
+	MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+	// MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+	// the "messaging.message.payload_size_bytes" semantic conventions. It
+	// represents the (uncompressed) size of the message payload in bytes. Also
+	// use this attribute if it is unknown whether the compressed or
+	// uncompressed payload size is reported.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 2738
+	MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+	// MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+	// conforming to the "messaging.message.payload_compressed_size_bytes"
+	// semantic conventions. It represents the compressed size of the message
+	// payload in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 2048
+	MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+	return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+	return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+	return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+	return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+	// MessagingDestinationNameKey is the attribute Key conforming to the
+	// "messaging.destination.name" semantic conventions. It represents the
+	// message destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'MyQueue', 'MyTopic'
+	// Note: Destination name SHOULD uniquely identify a specific queue, topic
+	// or other entity within the broker. If
+	// the broker does not have such notion, the destination name SHOULD
+	// uniquely identify the broker.
+	MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+	// MessagingDestinationKindKey is the attribute Key conforming to the
+	// "messaging.destination.kind" semantic conventions. It represents the
+	// kind of message destination
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingDestinationKindKey = attribute.Key("messaging.destination.kind")
+
+	// MessagingDestinationTemplateKey is the attribute Key conforming to the
+	// "messaging.destination.template" semantic conventions. It represents the
+	// low cardinality representation of the messaging destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/customers/{customerID}'
+	// Note: Destination names could be constructed from templates. An example
+	// would be a destination name involving a user name or product id.
+	// Although the destination name in this case is of high cardinality, the
+	// underlying template is of low cardinality and can be effectively used
+	// for grouping and aggregation.
+	MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+	// MessagingDestinationTemporaryKey is the attribute Key conforming to the
+	// "messaging.destination.temporary" semantic conventions. It represents a
+	// boolean that is true if the message destination is temporary and might
+	// not exist anymore after messages are processed.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+	// MessagingDestinationAnonymousKey is the attribute Key conforming to the
+	// "messaging.destination.anonymous" semantic conventions. It represents a
+	// boolean that is true if the message destination is anonymous (could be
+	// unnamed or have auto-generated name).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+var (
+	// A message sent to a queue
+	MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+	// A message sent to a topic
+	MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+	return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+	return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+	return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+	return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+	// MessagingSourceNameKey is the attribute Key conforming to the
+	// "messaging.source.name" semantic conventions. It represents the message
+	// source name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'MyQueue', 'MyTopic'
+	// Note: Source name SHOULD uniquely identify a specific queue, topic, or
+	// other entity within the broker. If
+	// the broker does not have such notion, the source name SHOULD uniquely
+	// identify the broker.
+	MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+	// MessagingSourceKindKey is the attribute Key conforming to the
+	// "messaging.source.kind" semantic conventions. It represents the kind of
+	// message source
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingSourceKindKey = attribute.Key("messaging.source.kind")
+
+	// MessagingSourceTemplateKey is the attribute Key conforming to the
+	// "messaging.source.template" semantic conventions. It represents the low
+	// cardinality representation of the messaging source name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '/customers/{customerID}'
+	// Note: Source names could be constructed from templates. An example would
+	// be a source name involving a user name or product id. Although the
+	// source name in this case is of high cardinality, the underlying template
+	// is of low cardinality and can be effectively used for grouping and
+	// aggregation.
+	MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+	// MessagingSourceTemporaryKey is the attribute Key conforming to the
+	// "messaging.source.temporary" semantic conventions. It represents a
+	// boolean that is true if the message source is temporary and might not
+	// exist anymore after messages are processed.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+	// MessagingSourceAnonymousKey is the attribute Key conforming to the
+	// "messaging.source.anonymous" semantic conventions. It represents a
+	// boolean that is true if the message source is anonymous (could be
+	// unnamed or have auto-generated name).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+var (
+	// A message received from a queue
+	MessagingSourceKindQueue = MessagingSourceKindKey.String("queue")
+	// A message received from a topic
+	MessagingSourceKindTopic = MessagingSourceKindKey.String("topic")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+	return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+	return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+	return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+	return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// General attributes used in messaging systems.
+const (
+	// MessagingSystemKey is the attribute Key conforming to the
+	// "messaging.system" semantic conventions. It represents a string
+	// identifying the messaging system.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+	MessagingSystemKey = attribute.Key("messaging.system")
+
+	// MessagingOperationKey is the attribute Key conforming to the
+	// "messaging.operation" semantic conventions. It represents a string
+	// identifying the kind of messaging operation as defined in the [Operation
+	// names](#operation-names) section above.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	// Note: If a custom value is used, it MUST be of low cardinality.
+	MessagingOperationKey = attribute.Key("messaging.operation")
+
+	// MessagingBatchMessageCountKey is the attribute Key conforming to the
+	// "messaging.batch.message_count" semantic conventions. It represents the
+	// number of messages sent, received, or processed in the scope of the
+	// batching operation.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If the span describes an
+	// operation on a batch of messages.)
+	// Stability: stable
+	// Examples: 0, 1, 2
+	// Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+	// spans that operate with a single message. When a messaging client
+	// library supports both batch and single-message API for the same
+	// operation, instrumentations SHOULD use `messaging.batch.message_count`
+	// for batching APIs and SHOULD NOT use it for single-message APIs.
+	MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+	// publish
+	MessagingOperationPublish = MessagingOperationKey.String("publish")
+	// receive
+	MessagingOperationReceive = MessagingOperationKey.String("receive")
+	// process
+	MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+	return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+	return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+	// MessagingConsumerIDKey is the attribute Key conforming to the
+	// "messaging.consumer.id" semantic conventions. It represents the
+	// identifier for the consumer receiving a message. For Kafka, set it to
+	// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+	// both are present, or only `messaging.kafka.consumer.group`. For brokers,
+	// such as RabbitMQ and Artemis, set it to the `client_id` of the client
+	// consuming the message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'mygroup - client-6'
+	MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+	return MessagingConsumerIDKey.String(val)
+}
+
+// Attributes for RabbitMQ
+const (
+	// MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+	// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+	// conventions. It represents the rabbitMQ message routing key.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If not empty.)
+	// Stability: stable
+	// Examples: 'myKey'
+	MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+	return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+	// MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+	// "messaging.kafka.message.key" semantic conventions. It represents the
+	// message keys in Kafka are used for grouping alike messages to ensure
+	// they're processed on the same partition. They differ from
+	// `messaging.message.id` in that they're not unique. If the key is `null`,
+	// the attribute MUST NOT be set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'myKey'
+	// Note: If the key type is not string, it's string representation has to
+	// be supplied for the attribute. If the key has no unambiguous, canonical
+	// string form, don't include its value.
+	MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+	// MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+	// "messaging.kafka.consumer.group" semantic conventions. It represents the
+	// name of the Kafka Consumer Group that is handling the message. Only
+	// applies to consumers, not producers.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'my-group'
+	MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+	// MessagingKafkaClientIDKey is the attribute Key conforming to the
+	// "messaging.kafka.client_id" semantic conventions. It represents the
+	// client ID for the Consumer or Producer that is handling the message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'client-5'
+	MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+	// MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+	// the "messaging.kafka.destination.partition" semantic conventions. It
+	// represents the partition the message is sent to.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 2
+	MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+	// MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+	// "messaging.kafka.source.partition" semantic conventions. It represents
+	// the partition the message is received from.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 2
+	MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+	// MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+	// "messaging.kafka.message.offset" semantic conventions. It represents the
+	// offset of a record in the corresponding Kafka partition.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 42
+	MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+	// MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+	// "messaging.kafka.message.tombstone" semantic conventions. It represents
+	// a boolean that is true if the message is a tombstone.
+	//
+	// Type: boolean
+	// RequirementLevel: ConditionallyRequired (If value is `true`. When
+	// missing, the value is assumed to be `false`.)
+	// Stability: stable
+	MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+	return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+	return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+	return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+	return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+	return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+	return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+	return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+	// MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+	// "messaging.rocketmq.namespace" semantic conventions. It represents the
+	// namespace of RocketMQ resources, resources in different namespaces are
+	// individual.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'myNamespace'
+	MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+	// MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.client_group" semantic conventions. It represents
+	// the name of the RocketMQ producer/consumer group that is handling the
+	// message. The client type is identified by the SpanKind.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'myConsumerGroup'
+	MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+	// MessagingRocketmqClientIDKey is the attribute Key conforming to the
+	// "messaging.rocketmq.client_id" semantic conventions. It represents the
+	// unique identifier for each client.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: stable
+	// Examples: 'myhost@8742@s8083jm'
+	MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+	// MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delivery_timestamp"
+	// semantic conventions. It represents the timestamp in milliseconds that
+	// the delay message is expected to be delivered to consumer.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If the message type is delay
+	// and delay time level is not specified.)
+	// Stability: stable
+	// Examples: 1665987217045
+	MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+	// MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+	// conventions. It represents the delay time level for delay message, which
+	// determines the message delay time.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If the message type is delay
+	// and delivery timestamp is not specified.)
+	// Stability: stable
+	// Examples: 3
+	MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+	// MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.group" semantic conventions. It represents
+	// the it is essential for FIFO message. Messages that belong to the same
+	// message group are always processed one by one within the same consumer
+	// group.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+	// Stability: stable
+	// Examples: 'myMessageGroup'
+	MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+	// MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.type" semantic conventions. It represents
+	// the type of message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+	// MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+	// secondary classifier of message besides topic.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'tagA'
+	MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+	// MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.keys" semantic conventions. It represents
+	// the key(s) of message, another way to mark message besides message id.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'keyA', 'keyB'
+	MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+	// MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+	// the "messaging.rocketmq.consumption_model" semantic conventions. It
+	// represents the model of message consumption. This only applies to
+	// consumer spans.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: stable
+	MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+	// Normal message
+	MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+	// FIFO message
+	MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+	// Delay message
+	MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+	// Transaction message
+	MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+	// Clustering consumption model
+	MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+	// Broadcasting consumption model
+	MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+	return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+	return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+	return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+	// RPCSystemKey is the attribute Key conforming to the "rpc.system"
+	// semantic conventions. It represents a string identifying the remoting
+	// system. See below for a list of well-known identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	RPCSystemKey = attribute.Key("rpc.system")
+
+	// RPCServiceKey is the attribute Key conforming to the "rpc.service"
+	// semantic conventions. It represents the full (logical) name of the
+	// service being called, including its package name, if applicable.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'myservice.EchoService'
+	// Note: This is the logical name of the service from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// class. The `code.namespace` attribute may be used to store the latter
+	// (despite the attribute name, it may include a class name; e.g., class
+	// with method actually executing the call on the server side, RPC client
+	// stub class on the client side).
+	RPCServiceKey = attribute.Key("rpc.service")
+
+	// RPCMethodKey is the attribute Key conforming to the "rpc.method"
+	// semantic conventions. It represents the name of the (logical) method
+	// being called, must be equal to the $method part in the span name.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: stable
+	// Examples: 'exampleMethod'
+	// Note: This is the logical name of the method from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// method/function. The `code.function` attribute may be used to store the
+	// latter (e.g., method actually executing the call on the server side, RPC
+	// client stub method on the client side).
+	RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+	// gRPC
+	RPCSystemGRPC = RPCSystemKey.String("grpc")
+	// Java RMI
+	RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+	// .NET WCF
+	RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+	// Apache Dubbo
+	RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+	return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+	return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+	// RPCGRPCStatusCodeKey is the attribute Key conforming to the
+	// "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+	// status
+	// code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+	// the gRPC request.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: stable
+	RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+	// OK
+	RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+	// CANCELLED
+	RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+	// UNKNOWN
+	RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+	// INVALID_ARGUMENT
+	RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+	// DEADLINE_EXCEEDED
+	RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+	// NOT_FOUND
+	RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+	// ALREADY_EXISTS
+	RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+	// PERMISSION_DENIED
+	RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+	// RESOURCE_EXHAUSTED
+	RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+	// FAILED_PRECONDITION
+	RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+	// ABORTED
+	RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+	// OUT_OF_RANGE
+	RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+	// UNIMPLEMENTED
+	RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+	// INTERNAL
+	RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+	// UNAVAILABLE
+	RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+	// DATA_LOSS
+	RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+	// UNAUTHENTICATED
+	RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+	// RPCJsonrpcVersionKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+	// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+	// does not specify this, the value can be omitted.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If other than the default
+	// version (`1.0`))
+	// Stability: stable
+	// Examples: '2.0', '1.0'
+	RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+	// RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+	// property of request or response. Since protocol allows id to be int,
+	// string, `null` or missing (for notifications), value is expected to be
+	// cast to string for simplicity. Use empty string in case of `null` value.
+	// Omit entirely if this is a notification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: '10', 'request-7', ''
+	RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+	// RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+	// `error.code` property of response if it is an error response.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If response is not successful.)
+	// Stability: stable
+	// Examples: -32700, 100
+	RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+	// RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+	// `error.message` property of response if it is an error response.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: stable
+	// Examples: 'Parse error', 'User already exists'
+	RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+	return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+	return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+	return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+	return RPCJsonrpcErrorMessageKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go
new file mode 100644
index 000000000..caf7249de
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+	"go.opentelemetry.io/otel/internal/global"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Tracer creates a named tracer that implements Tracer interface.
+// If the name is an empty string then provider uses default name.
+//
+// This is short for GetTracerProvider().Tracer(name, opts...)
+func Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+	return GetTracerProvider().Tracer(name, opts...)
+}
+
+// GetTracerProvider returns the registered global trace provider.
+// If none is registered then an instance of NoopTracerProvider is returned.
+//
+// Use the trace provider to create a named tracer. E.g.
+//
+//	tracer := otel.GetTracerProvider().Tracer("example.com/foo")
+//
+// or
+//
+//	tracer := otel.Tracer("example.com/foo")
+func GetTracerProvider() trace.TracerProvider {
+	return global.TracerProvider()
+}
+
+// SetTracerProvider registers `tp` as the global trace provider.
+func SetTracerProvider(tp trace.TracerProvider) {
+	global.SetTracerProvider(tp)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
new file mode 100644
index 000000000..cb3efbb9a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -0,0 +1,333 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+)
+
+// TracerConfig is a group of options for a Tracer.
+type TracerConfig struct {
+	instrumentationVersion string
+	// Schema URL of the telemetry emitted by the Tracer.
+	schemaURL string
+	attrs     attribute.Set
+}
+
+// InstrumentationVersion returns the version of the library providing instrumentation.
+func (t *TracerConfig) InstrumentationVersion() string {
+	return t.instrumentationVersion
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (t *TracerConfig) InstrumentationAttributes() attribute.Set {
+	return t.attrs
+}
+
+// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer.
+func (t *TracerConfig) SchemaURL() string {
+	return t.schemaURL
+}
+
+// NewTracerConfig applies all the options to a returned TracerConfig.
+func NewTracerConfig(options ...TracerOption) TracerConfig {
+	var config TracerConfig
+	for _, option := range options {
+		config = option.apply(config)
+	}
+	return config
+}
+
+// TracerOption applies an option to a TracerConfig.
+type TracerOption interface {
+	apply(TracerConfig) TracerConfig
+}
+
+type tracerOptionFunc func(TracerConfig) TracerConfig
+
+func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig {
+	return fn(cfg)
+}
+
+// SpanConfig is a group of options for a Span.
+type SpanConfig struct {
+	attributes []attribute.KeyValue
+	timestamp  time.Time
+	links      []Link
+	newRoot    bool
+	spanKind   SpanKind
+	stackTrace bool
+}
+
+// Attributes describe the associated qualities of a Span.
+func (cfg *SpanConfig) Attributes() []attribute.KeyValue {
+	return cfg.attributes
+}
+
+// Timestamp is a time in a Span life-cycle.
+func (cfg *SpanConfig) Timestamp() time.Time {
+	return cfg.timestamp
+}
+
+// StackTrace checks whether stack trace capturing is enabled.
+func (cfg *SpanConfig) StackTrace() bool {
+	return cfg.stackTrace
+}
+
+// Links are the associations a Span has with other Spans.
+func (cfg *SpanConfig) Links() []Link {
+	return cfg.links
+}
+
+// NewRoot identifies a Span as the root Span for a new trace. This is
+// commonly used when an existing trace crosses trust boundaries and the
+// remote parent span context should be ignored for security.
+func (cfg *SpanConfig) NewRoot() bool {
+	return cfg.newRoot
+}
+
+// SpanKind is the role a Span has in a trace.
+func (cfg *SpanConfig) SpanKind() SpanKind {
+	return cfg.spanKind
+}
+
+// NewSpanStartConfig applies all the options to a returned SpanConfig.
+// No validation is performed on the returned SpanConfig (e.g. no uniqueness
+// checking or bounding of data), it is left to the SDK to perform this
+// action.
+func NewSpanStartConfig(options ...SpanStartOption) SpanConfig {
+	var c SpanConfig
+	for _, option := range options {
+		c = option.applySpanStart(c)
+	}
+	return c
+}
+
+// NewSpanEndConfig applies all the options to a returned SpanConfig.
+// No validation is performed on the returned SpanConfig (e.g. no uniqueness
+// checking or bounding of data), it is left to the SDK to perform this
+// action.
+func NewSpanEndConfig(options ...SpanEndOption) SpanConfig {
+	var c SpanConfig
+	for _, option := range options {
+		c = option.applySpanEnd(c)
+	}
+	return c
+}
+
+// SpanStartOption applies an option to a SpanConfig. These options are applicable
+// only when the span is created.
+type SpanStartOption interface {
+	applySpanStart(SpanConfig) SpanConfig
+}
+
+type spanOptionFunc func(SpanConfig) SpanConfig
+
+func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig {
+	return fn(cfg)
+}
+
+// SpanEndOption applies an option to a SpanConfig. These options are
+// applicable only when the span is ended.
+type SpanEndOption interface {
+	applySpanEnd(SpanConfig) SpanConfig
+}
+
+// EventConfig is a group of options for an Event.
+type EventConfig struct {
+	attributes []attribute.KeyValue
+	timestamp  time.Time
+	stackTrace bool
+}
+
+// Attributes describe the associated qualities of an Event.
+func (cfg *EventConfig) Attributes() []attribute.KeyValue {
+	return cfg.attributes
+}
+
+// Timestamp is a time in an Event life-cycle.
+func (cfg *EventConfig) Timestamp() time.Time {
+	return cfg.timestamp
+}
+
+// StackTrace checks whether stack trace capturing is enabled.
+func (cfg *EventConfig) StackTrace() bool {
+	return cfg.stackTrace
+}
+
+// NewEventConfig applies all the EventOptions to a returned EventConfig. If no
+// timestamp option is passed, the returned EventConfig will have a Timestamp
+// set to the call time, otherwise no validation is performed on the returned
+// EventConfig.
+func NewEventConfig(options ...EventOption) EventConfig {
+	var c EventConfig
+	for _, option := range options {
+		c = option.applyEvent(c)
+	}
+	if c.timestamp.IsZero() {
+		c.timestamp = time.Now()
+	}
+	return c
+}
+
+// EventOption applies span event options to an EventConfig.
+type EventOption interface {
+	applyEvent(EventConfig) EventConfig
+}
+
+// SpanOption are options that can be used at both the beginning and end of a span.
+type SpanOption interface {
+	SpanStartOption
+	SpanEndOption
+}
+
+// SpanStartEventOption are options that can be used at the start of a span, or with an event.
+type SpanStartEventOption interface {
+	SpanStartOption
+	EventOption
+}
+
+// SpanEndEventOption are options that can be used at the end of a span, or with an event.
+type SpanEndEventOption interface {
+	SpanEndOption
+	EventOption
+}
+
+type attributeOption []attribute.KeyValue
+
+func (o attributeOption) applySpan(c SpanConfig) SpanConfig {
+	c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
+	return c
+}
+func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o attributeOption) applyEvent(c EventConfig) EventConfig {
+	c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
+	return c
+}
+
+var _ SpanStartEventOption = attributeOption{}
+
+// WithAttributes adds the attributes related to a span life-cycle event.
+// These attributes are used to describe the work a Span represents when this
+// option is provided to a Span's start or end events. Otherwise, these
+// attributes provide additional information about the event being recorded
+// (e.g. error, state change, processing progress, system event).
+//
+// If multiple of these options are passed the attributes of each successive
+// option will extend the attributes instead of overwriting. There is no
+// guarantee of uniqueness in the resulting attributes.
+func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption {
+	return attributeOption(attributes)
+}
+
+// SpanEventOption are options that can be used with an event or a span.
+type SpanEventOption interface {
+	SpanOption
+	EventOption
+}
+
+type timestampOption time.Time
+
+func (o timestampOption) applySpan(c SpanConfig) SpanConfig {
+	c.timestamp = time.Time(o)
+	return c
+}
+func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig   { return o.applySpan(c) }
+func (o timestampOption) applyEvent(c EventConfig) EventConfig {
+	c.timestamp = time.Time(o)
+	return c
+}
+
+var _ SpanEventOption = timestampOption{}
+
+// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g.
+// started, stopped, errored).
+func WithTimestamp(t time.Time) SpanEventOption {
+	return timestampOption(t)
+}
+
+type stackTraceOption bool
+
+func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
+	c.stackTrace = bool(o)
+	return c
+}
+func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
+	c.stackTrace = bool(o)
+	return c
+}
+func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
+
+// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false).
+func WithStackTrace(b bool) SpanEndEventOption {
+	return stackTraceOption(b)
+}
+
+// WithLinks adds links to a Span. The links are added to the existing Span
+// links, i.e. this does not overwrite. Links with invalid span context are ignored.
+func WithLinks(links ...Link) SpanStartOption {
+	return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+		cfg.links = append(cfg.links, links...)
+		return cfg
+	})
+}
+
+// WithNewRoot specifies that the Span should be treated as a root Span. Any
+// existing parent span context will be ignored when defining the Span's trace
+// identifiers.
+func WithNewRoot() SpanStartOption {
+	return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+		cfg.newRoot = true
+		return cfg
+	})
+}
+
+// WithSpanKind sets the SpanKind of a Span.
+func WithSpanKind(kind SpanKind) SpanStartOption {
+	return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+		cfg.spanKind = kind
+		return cfg
+	})
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) TracerOption {
+	return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
+		cfg.instrumentationVersion = version
+		return cfg
+	})
+}
+
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
+	return tracerOptionFunc(func(config TracerConfig) TracerConfig {
+		config.attrs = attribute.NewSet(attr...)
+		return config
+	})
+}
+
+// WithSchemaURL sets the schema URL for the Tracer.
+func WithSchemaURL(schemaURL string) TracerOption {
+	return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
+		cfg.schemaURL = schemaURL
+		return cfg
+	})
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
new file mode 100644
index 000000000..76f9a083c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/context.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "context"
+
+type traceContextKeyType int
+
+const currentSpanKey traceContextKeyType = iota
+
+// ContextWithSpan returns a copy of parent with span set as the current Span.
+func ContextWithSpan(parent context.Context, span Span) context.Context {
+	return context.WithValue(parent, currentSpanKey, span)
+}
+
+// ContextWithSpanContext returns a copy of parent with sc as the current
+// Span. The Span implementation that wraps sc is non-recording and performs
+// no operations other than to return sc as the SpanContext from the
+// SpanContext method.
+func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context {
+	return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
+}
+
+// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly
+// as a remote SpanContext and as the current Span. The Span implementation
+// that wraps rsc is non-recording and performs no operations other than to
+// return rsc as the SpanContext from the SpanContext method.
+func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context {
+	return ContextWithSpanContext(parent, rsc.WithRemote(true))
+}
+
+// SpanFromContext returns the current Span from ctx.
+//
+// If no Span is currently set in ctx an implementation of a Span that
+// performs no operations is returned.
+func SpanFromContext(ctx context.Context) Span {
+	if ctx == nil {
+		return noopSpan{}
+	}
+	if span, ok := ctx.Value(currentSpanKey).(Span); ok {
+		return span
+	}
+	return noopSpan{}
+}
+
+// SpanContextFromContext returns the current Span's SpanContext.
+func SpanContextFromContext(ctx context.Context) SpanContext {
+	return SpanFromContext(ctx).SpanContext()
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
new file mode 100644
index 000000000..ab0346f96
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package trace provides an implementation of the tracing part of the
+OpenTelemetry API.
+
+To participate in distributed traces a Span needs to be created for the
+operation being performed as part of a traced workflow. In its simplest form:
+
+	var tracer trace.Tracer
+
+	func init() {
+		tracer = otel.Tracer("instrumentation/package/name")
+	}
+
+	func operation(ctx context.Context) {
+		var span trace.Span
+		ctx, span = tracer.Start(ctx, "operation")
+		defer span.End()
+		// ...
+	}
+
+A Tracer is unique to the instrumentation and is used to create Spans.
+Instrumentation should be designed to accept a TracerProvider from which it
+can create its own unique Tracer. Alternatively, the registered global
+TracerProvider from the go.opentelemetry.io/otel package can be used as
+a default.
+
+	const (
+		name    = "instrumentation/package/name"
+		version = "0.1.0"
+	)
+
+	type Instrumentation struct {
+		tracer trace.Tracer
+	}
+
+	func NewInstrumentation(tp trace.TracerProvider) *Instrumentation {
+		if tp == nil {
+			tp = otel.TracerProvider()
+		}
+		return &Instrumentation{
+			tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)),
+		}
+	}
+
+	func operation(ctx context.Context, inst *Instrumentation) {
+		var span trace.Span
+		ctx, span = inst.tracer.Start(ctx, "operation")
+		defer span.End()
+		// ...
+	}
+*/
+package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
new file mode 100644
index 000000000..88fcb8161
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
@@ -0,0 +1,27 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+// nonRecordingSpan is a minimal implementation of a Span that wraps a
+// SpanContext. It performs no operations other than to return the wrapped
+// SpanContext.
+type nonRecordingSpan struct {
+	noopSpan
+
+	sc SpanContext
+}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc }
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
new file mode 100644
index 000000000..73950f207
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+)
+
+// NewNoopTracerProvider returns an implementation of TracerProvider that
+// performs no operations. The Tracer and Spans created from the returned
+// TracerProvider also perform no operations.
+func NewNoopTracerProvider() TracerProvider {
+	return noopTracerProvider{}
+}
+
+type noopTracerProvider struct{}
+
+var _ TracerProvider = noopTracerProvider{}
+
+// Tracer returns noop implementation of Tracer.
+func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
+	return noopTracer{}
+}
+
+// noopTracer is an implementation of Tracer that preforms no operations.
+type noopTracer struct{}
+
+var _ Tracer = noopTracer{}
+
+// Start carries forward a non-recording Span, if one is present in the context, otherwise it
+// creates a no-op Span.
+func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) {
+	span := SpanFromContext(ctx)
+	if _, ok := span.(nonRecordingSpan); !ok {
+		// span is likely already a noopSpan, but let's be sure
+		span = noopSpan{}
+	}
+	return ContextWithSpan(ctx, span), span
+}
+
+// noopSpan is an implementation of Span that preforms no operations.
+type noopSpan struct{}
+
+var _ Span = noopSpan{}
+
+// SpanContext returns an empty span context.
+func (noopSpan) SpanContext() SpanContext { return SpanContext{} }
+
+// IsRecording always returns false.
+func (noopSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (noopSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (noopSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (noopSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (noopSpan) End(...SpanEndOption) {}
+
+// RecordError does nothing.
+func (noopSpan) RecordError(error, ...EventOption) {}
+
+// AddEvent does nothing.
+func (noopSpan) AddEvent(string, ...EventOption) {}
+
+// SetName does nothing.
+func (noopSpan) SetName(string) {}
+
+// TracerProvider returns a no-op TracerProvider.
+func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
new file mode 100644
index 000000000..4aa94f79f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -0,0 +1,551 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"bytes"
+	"context"
+	"encoding/hex"
+	"encoding/json"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+)
+
+const (
+	// FlagsSampled is a bitmask with the sampled bit set. A SpanContext
+	// with the sampling bit set means the span is sampled.
+	FlagsSampled = TraceFlags(0x01)
+
+	errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
+
+	errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
+	errNilTraceID           errorConst = "trace-id can't be all zero"
+
+	errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
+	errNilSpanID           errorConst = "span-id can't be all zero"
+)
+
+type errorConst string
+
+func (e errorConst) Error() string {
+	return string(e)
+}
+
+// TraceID is a unique identity of a trace.
+// nolint:revive // revive complains about stutter of `trace.TraceID`.
+type TraceID [16]byte
+
+var nilTraceID TraceID
+var _ json.Marshaler = nilTraceID
+
+// IsValid checks whether the trace TraceID is valid. A valid trace ID does
+// not consist of zeros only.
+func (t TraceID) IsValid() bool {
+	return !bytes.Equal(t[:], nilTraceID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode TraceID
+// as a hex string.
+func (t TraceID) MarshalJSON() ([]byte, error) {
+	return json.Marshal(t.String())
+}
+
+// String returns the hex string representation form of a TraceID.
+func (t TraceID) String() string {
+	return hex.EncodeToString(t[:])
+}
+
+// SpanID is a unique identity of a span in a trace.
+type SpanID [8]byte
+
+var nilSpanID SpanID
+var _ json.Marshaler = nilSpanID
+
+// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
+// of zeros only.
+func (s SpanID) IsValid() bool {
+	return !bytes.Equal(s[:], nilSpanID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode SpanID
+// as a hex string.
+func (s SpanID) MarshalJSON() ([]byte, error) {
+	return json.Marshal(s.String())
+}
+
+// String returns the hex string representation form of a SpanID.
+func (s SpanID) String() string {
+	return hex.EncodeToString(s[:])
+}
+
+// TraceIDFromHex returns a TraceID from a hex string if it is compliant with
+// the W3C trace-context specification.  See more at
+// https://www.w3.org/TR/trace-context/#trace-id
+// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`.
+func TraceIDFromHex(h string) (TraceID, error) {
+	t := TraceID{}
+	if len(h) != 32 {
+		return t, errInvalidTraceIDLength
+	}
+
+	if err := decodeHex(h, t[:]); err != nil {
+		return t, err
+	}
+
+	if !t.IsValid() {
+		return t, errNilTraceID
+	}
+	return t, nil
+}
+
+// SpanIDFromHex returns a SpanID from a hex string if it is compliant
+// with the w3c trace-context specification.
+// See more at https://www.w3.org/TR/trace-context/#parent-id
+func SpanIDFromHex(h string) (SpanID, error) {
+	s := SpanID{}
+	if len(h) != 16 {
+		return s, errInvalidSpanIDLength
+	}
+
+	if err := decodeHex(h, s[:]); err != nil {
+		return s, err
+	}
+
+	if !s.IsValid() {
+		return s, errNilSpanID
+	}
+	return s, nil
+}
+
+func decodeHex(h string, b []byte) error {
+	for _, r := range h {
+		switch {
+		case 'a' <= r && r <= 'f':
+			continue
+		case '0' <= r && r <= '9':
+			continue
+		default:
+			return errInvalidHexID
+		}
+	}
+
+	decoded, err := hex.DecodeString(h)
+	if err != nil {
+		return err
+	}
+
+	copy(b, decoded)
+	return nil
+}
+
+// TraceFlags contains flags that can be set on a SpanContext.
+type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
+
+// IsSampled returns if the sampling bit is set in the TraceFlags.
+func (tf TraceFlags) IsSampled() bool {
+	return tf&FlagsSampled == FlagsSampled
+}
+
+// WithSampled sets the sampling bit in a new copy of the TraceFlags.
+func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive  // sampled is not a control flag.
+	if sampled {
+		return tf | FlagsSampled
+	}
+
+	return tf &^ FlagsSampled
+}
+
+// MarshalJSON implements a custom marshal function to encode TraceFlags
+// as a hex string.
+func (tf TraceFlags) MarshalJSON() ([]byte, error) {
+	return json.Marshal(tf.String())
+}
+
+// String returns the hex string representation form of TraceFlags.
+func (tf TraceFlags) String() string {
+	return hex.EncodeToString([]byte{byte(tf)}[:])
+}
+
+// SpanContextConfig contains mutable fields usable for constructing
+// an immutable SpanContext.
+type SpanContextConfig struct {
+	TraceID    TraceID
+	SpanID     SpanID
+	TraceFlags TraceFlags
+	TraceState TraceState
+	Remote     bool
+}
+
+// NewSpanContext constructs a SpanContext using values from the provided
+// SpanContextConfig.
+func NewSpanContext(config SpanContextConfig) SpanContext {
+	return SpanContext{
+		traceID:    config.TraceID,
+		spanID:     config.SpanID,
+		traceFlags: config.TraceFlags,
+		traceState: config.TraceState,
+		remote:     config.Remote,
+	}
+}
+
+// SpanContext contains identifying trace information about a Span.
+type SpanContext struct {
+	traceID    TraceID
+	spanID     SpanID
+	traceFlags TraceFlags
+	traceState TraceState
+	remote     bool
+}
+
+var _ json.Marshaler = SpanContext{}
+
+// IsValid returns if the SpanContext is valid. A valid span context has a
+// valid TraceID and SpanID.
+func (sc SpanContext) IsValid() bool {
+	return sc.HasTraceID() && sc.HasSpanID()
+}
+
+// IsRemote indicates whether the SpanContext represents a remotely-created Span.
+func (sc SpanContext) IsRemote() bool {
+	return sc.remote
+}
+
+// WithRemote returns a copy of sc with the Remote property set to remote.
+func (sc SpanContext) WithRemote(remote bool) SpanContext {
+	return SpanContext{
+		traceID:    sc.traceID,
+		spanID:     sc.spanID,
+		traceFlags: sc.traceFlags,
+		traceState: sc.traceState,
+		remote:     remote,
+	}
+}
+
+// TraceID returns the TraceID from the SpanContext.
+func (sc SpanContext) TraceID() TraceID {
+	return sc.traceID
+}
+
+// HasTraceID checks if the SpanContext has a valid TraceID.
+func (sc SpanContext) HasTraceID() bool {
+	return sc.traceID.IsValid()
+}
+
+// WithTraceID returns a new SpanContext with the TraceID replaced.
+func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext {
+	return SpanContext{
+		traceID:    traceID,
+		spanID:     sc.spanID,
+		traceFlags: sc.traceFlags,
+		traceState: sc.traceState,
+		remote:     sc.remote,
+	}
+}
+
+// SpanID returns the SpanID from the SpanContext.
+func (sc SpanContext) SpanID() SpanID {
+	return sc.spanID
+}
+
+// HasSpanID checks if the SpanContext has a valid SpanID.
+func (sc SpanContext) HasSpanID() bool {
+	return sc.spanID.IsValid()
+}
+
+// WithSpanID returns a new SpanContext with the SpanID replaced.
+func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext {
+	return SpanContext{
+		traceID:    sc.traceID,
+		spanID:     spanID,
+		traceFlags: sc.traceFlags,
+		traceState: sc.traceState,
+		remote:     sc.remote,
+	}
+}
+
+// TraceFlags returns the flags from the SpanContext.
+func (sc SpanContext) TraceFlags() TraceFlags {
+	return sc.traceFlags
+}
+
+// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags.
+func (sc SpanContext) IsSampled() bool {
+	return sc.traceFlags.IsSampled()
+}
+
+// WithTraceFlags returns a new SpanContext with the TraceFlags replaced.
+func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext {
+	return SpanContext{
+		traceID:    sc.traceID,
+		spanID:     sc.spanID,
+		traceFlags: flags,
+		traceState: sc.traceState,
+		remote:     sc.remote,
+	}
+}
+
+// TraceState returns the TraceState from the SpanContext.
+func (sc SpanContext) TraceState() TraceState {
+	return sc.traceState
+}
+
+// WithTraceState returns a new SpanContext with the TraceState replaced.
+func (sc SpanContext) WithTraceState(state TraceState) SpanContext {
+	return SpanContext{
+		traceID:    sc.traceID,
+		spanID:     sc.spanID,
+		traceFlags: sc.traceFlags,
+		traceState: state,
+		remote:     sc.remote,
+	}
+}
+
+// Equal is a predicate that determines whether two SpanContext values are equal.
+func (sc SpanContext) Equal(other SpanContext) bool {
+	return sc.traceID == other.traceID &&
+		sc.spanID == other.spanID &&
+		sc.traceFlags == other.traceFlags &&
+		sc.traceState.String() == other.traceState.String() &&
+		sc.remote == other.remote
+}
+
+// MarshalJSON implements a custom marshal function to encode a SpanContext.
+func (sc SpanContext) MarshalJSON() ([]byte, error) {
+	return json.Marshal(SpanContextConfig{
+		TraceID:    sc.traceID,
+		SpanID:     sc.spanID,
+		TraceFlags: sc.traceFlags,
+		TraceState: sc.traceState,
+		Remote:     sc.remote,
+	})
+}
+
+// Span is the individual component of a trace. It represents a single named
+// and timed operation of a workflow that is traced. A Tracer is used to
+// create a Span and it is then up to the operation the Span represents to
+// properly end the Span when the operation itself ends.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Span interface {
+	// End completes the Span. The Span is considered complete and ready to be
+	// delivered through the rest of the telemetry pipeline after this method
+	// is called. Therefore, updates to the Span are not allowed after this
+	// method has been called.
+	End(options ...SpanEndOption)
+
+	// AddEvent adds an event with the provided name and options.
+	AddEvent(name string, options ...EventOption)
+
+	// IsRecording returns the recording state of the Span. It will return
+	// true if the Span is active and events can be recorded.
+	IsRecording() bool
+
+	// RecordError will record err as an exception span event for this span. An
+	// additional call to SetStatus is required if the Status of the Span should
+	// be set to Error, as this method does not change the Span status. If this
+	// span is not being recorded or err is nil then this method does nothing.
+	RecordError(err error, options ...EventOption)
+
+	// SpanContext returns the SpanContext of the Span. The returned SpanContext
+	// is usable even after the End method has been called for the Span.
+	SpanContext() SpanContext
+
+	// SetStatus sets the status of the Span in the form of a code and a
+	// description, provided the status hasn't already been set to a higher
+	// value before (OK > Error > Unset). The description is only included in a
+	// status when the code is for an error.
+	SetStatus(code codes.Code, description string)
+
+	// SetName sets the Span name.
+	SetName(name string)
+
+	// SetAttributes sets kv as attributes of the Span. If a key from kv
+	// already exists for an attribute of the Span it will be overwritten with
+	// the value contained in kv.
+	SetAttributes(kv ...attribute.KeyValue)
+
+	// TracerProvider returns a TracerProvider that can be used to generate
+	// additional Spans on the same telemetry pipeline as the current Span.
+	TracerProvider() TracerProvider
+}
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+//
+// For example, a Link is used in the following situations:
+//
+//  1. Batch Processing: A batch of operations may contain operations
+//     associated with one or more traces/spans. Since there can only be one
+//     parent SpanContext, a Link is used to keep reference to the
+//     SpanContext of all operations in the batch.
+//  2. Public Endpoint: A SpanContext for an in incoming client request on a
+//     public endpoint should be considered untrusted. In such a case, a new
+//     trace with its own identity and sampling decision needs to be created,
+//     but this new trace needs to be related to the original trace in some
+//     form. A Link is used to keep reference to the original SpanContext and
+//     track the relationship.
+type Link struct {
+	// SpanContext of the linked Span.
+	SpanContext SpanContext
+
+	// Attributes describe the aspects of the link.
+	Attributes []attribute.KeyValue
+}
+
+// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
+func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
+	return Link{
+		SpanContext: SpanContextFromContext(ctx),
+		Attributes:  attrs,
+	}
+}
+
+// SpanKind is the role a Span plays in a Trace.
+type SpanKind int
+
+// As a convenience, these match the proto definition, see
+// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
+//
+// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
+// to coerce a span kind to a valid value.
+const (
+	// SpanKindUnspecified is an unspecified SpanKind and is not a valid
+	// SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
+	// if it is received.
+	SpanKindUnspecified SpanKind = 0
+	// SpanKindInternal is a SpanKind for a Span that represents an internal
+	// operation within an application.
+	SpanKindInternal SpanKind = 1
+	// SpanKindServer is a SpanKind for a Span that represents the operation
+	// of handling a request from a client.
+	SpanKindServer SpanKind = 2
+	// SpanKindClient is a SpanKind for a Span that represents the operation
+	// of client making a request to a server.
+	SpanKindClient SpanKind = 3
+	// SpanKindProducer is a SpanKind for a Span that represents the operation
+	// of a producer sending a message to a message broker. Unlike
+	// SpanKindClient and SpanKindServer, there is often no direct
+	// relationship between this kind of Span and a SpanKindConsumer kind. A
+	// SpanKindProducer Span will end once the message is accepted by the
+	// message broker which might not overlap with the processing of that
+	// message.
+	SpanKindProducer SpanKind = 4
+	// SpanKindConsumer is a SpanKind for a Span that represents the operation
+	// of a consumer receiving a message from a message broker. Like
+	// SpanKindProducer Spans, there is often no direct relationship between
+	// this Span and the Span that produced the message.
+	SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value.  This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+	switch spanKind {
+	case SpanKindInternal,
+		SpanKindServer,
+		SpanKindClient,
+		SpanKindProducer,
+		SpanKindConsumer:
+		// valid
+		return spanKind
+	default:
+		return SpanKindInternal
+	}
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+	switch sk {
+	case SpanKindInternal:
+		return "internal"
+	case SpanKindServer:
+		return "server"
+	case SpanKindClient:
+		return "client"
+	case SpanKindProducer:
+		return "producer"
+	case SpanKindConsumer:
+		return "consumer"
+	default:
+		return "unspecified"
+	}
+}
+
+// Tracer is the creator of Spans.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Tracer interface {
+	// Start creates a span and a context.Context containing the newly-created span.
+	//
+	// If the context.Context provided in `ctx` contains a Span then the newly-created
+	// Span will be a child of that span, otherwise it will be a root span. This behavior
+	// can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
+	// newly-created Span to be a root span even if `ctx` contains a Span.
+	//
+	// When creating a Span it is recommended to provide all known span attributes using
+	// the `WithAttributes()` SpanOption as samplers will only have access to the
+	// attributes provided when a Span is created.
+	//
+	// Any Span that is created MUST also be ended. This is the responsibility of the user.
+	// Implementations of this API may leak memory or other resources if Spans are not ended.
+	Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
+}
+
+// TracerProvider provides Tracers that are used by instrumentation code to
+// trace computational workflows.
+//
+// A TracerProvider is the collection destination of all Spans from Tracers it
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Spans are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Tracers to instrument code.
+//
+// Commonly, instrumentation code will accept a TracerProvider implementation
+// at runtime from its users or it can simply use the globally registered one
+// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+//
+// Warning: methods may be added to this interface in minor releases.
+type TracerProvider interface {
+	// Tracer returns a unique Tracer scoped to be used by instrumentation code
+	// to trace computational workflows. The scope and identity of that
+	// instrumentation code is uniquely defined by the name and options passed.
+	//
+	// The passed name needs to uniquely identify instrumentation code.
+	// Therefore, it is recommended that name is the Go package name of the
+	// library providing instrumentation (note: not the code being
+	// instrumented). Instrumentation libraries can have multiple versions,
+	// therefore, the WithInstrumentationVersion option should be used to
+	// distinguish these different codebases. Additionally, instrumentation
+	// libraries may sometimes use traces to communicate different domains of
+	// workflow data (i.e. using spans to communicate workflow events only). If
+	// this is the case, the WithScopeAttributes option should be used to
+	// uniquely identify Tracers that handle the different domains of workflow
+	// data.
+	//
+	// If the same name and options are passed multiple times, the same Tracer
+	// will be returned (it is up to the implementation if this will be the
+	// same underlying instance of that Tracer or not). It is not necessary to
+	// call this multiple times with the same name and options to get an
+	// up-to-date Tracer. All implementations will ensure any TracerProvider
+	// configuration changes are propagated to all provided Tracers.
+	//
+	// If name is empty, then an implementation defined default name will be
+	// used instead.
+	//
+	// This method is safe to call concurrently.
+	Tracer(name string, options ...TracerOption) Tracer
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
new file mode 100644
index 000000000..ca68a82e5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -0,0 +1,212 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const (
+	maxListMembers = 32
+
+	listDelimiter = ","
+
+	// based on the W3C Trace Context specification, see
+	// https://www.w3.org/TR/trace-context-1/#tracestate-header
+	noTenantKeyFormat   = `[a-z][_0-9a-z\-\*\/]{0,255}`
+	withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
+	valueFormat         = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
+
+	errInvalidKey    errorConst = "invalid tracestate key"
+	errInvalidValue  errorConst = "invalid tracestate value"
+	errInvalidMember errorConst = "invalid tracestate list-member"
+	errMemberNumber  errorConst = "too many list-members in tracestate"
+	errDuplicate     errorConst = "duplicate list-member in tracestate"
+)
+
+var (
+	keyRe    = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`)
+	valueRe  = regexp.MustCompile(`^(` + valueFormat + `)$`)
+	memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
+)
+
+type member struct {
+	Key   string
+	Value string
+}
+
+func newMember(key, value string) (member, error) {
+	if !keyRe.MatchString(key) {
+		return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
+	}
+	if !valueRe.MatchString(value) {
+		return member{}, fmt.Errorf("%w: %s", errInvalidValue, value)
+	}
+	return member{Key: key, Value: value}, nil
+}
+
+func parseMember(m string) (member, error) {
+	matches := memberRe.FindStringSubmatch(m)
+	if len(matches) != 5 {
+		return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
+	}
+
+	return member{
+		Key:   matches[1],
+		Value: matches[4],
+	}, nil
+}
+
+// String encodes member into a string compliant with the W3C Trace Context
+// specification.
+func (m member) String() string {
+	return fmt.Sprintf("%s=%s", m.Key, m.Value)
+}
+
+// TraceState provides additional vendor-specific trace identification
+// information across different distributed tracing systems. It represents an
+// immutable list consisting of key/value pairs, each pair is referred to as a
+// list-member.
+//
+// TraceState conforms to the W3C Trace Context specification
+// (https://www.w3.org/TR/trace-context-1). All operations that create or copy
+// a TraceState do so by validating all input and will only produce TraceState
+// that conform to the specification. Specifically, this means that all
+// list-member's key/value pairs are valid, no duplicate list-members exist,
+// and the maximum number of list-members (32) is not exceeded.
+type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState`
+	// list is the members in order.
+	list []member
+}
+
+var _ json.Marshaler = TraceState{}
+
+// ParseTraceState attempts to decode a TraceState from the passed
+// string. It returns an error if the input is invalid according to the W3C
+// Trace Context specification.
+func ParseTraceState(tracestate string) (TraceState, error) {
+	if tracestate == "" {
+		return TraceState{}, nil
+	}
+
+	wrapErr := func(err error) error {
+		return fmt.Errorf("failed to parse tracestate: %w", err)
+	}
+
+	var members []member
+	found := make(map[string]struct{})
+	for _, memberStr := range strings.Split(tracestate, listDelimiter) {
+		if len(memberStr) == 0 {
+			continue
+		}
+
+		m, err := parseMember(memberStr)
+		if err != nil {
+			return TraceState{}, wrapErr(err)
+		}
+
+		if _, ok := found[m.Key]; ok {
+			return TraceState{}, wrapErr(errDuplicate)
+		}
+		found[m.Key] = struct{}{}
+
+		members = append(members, m)
+		if n := len(members); n > maxListMembers {
+			return TraceState{}, wrapErr(errMemberNumber)
+		}
+	}
+
+	return TraceState{list: members}, nil
+}
+
+// MarshalJSON marshals the TraceState into JSON.
+func (ts TraceState) MarshalJSON() ([]byte, error) {
+	return json.Marshal(ts.String())
+}
+
+// String encodes the TraceState into a string compliant with the W3C
+// Trace Context specification. The returned string will be invalid if the
+// TraceState contains any invalid members.
+func (ts TraceState) String() string {
+	members := make([]string, len(ts.list))
+	for i, m := range ts.list {
+		members[i] = m.String()
+	}
+	return strings.Join(members, listDelimiter)
+}
+
+// Get returns the value paired with key from the corresponding TraceState
+// list-member if it exists, otherwise an empty string is returned.
+func (ts TraceState) Get(key string) string {
+	for _, member := range ts.list {
+		if member.Key == key {
+			return member.Value
+		}
+	}
+
+	return ""
+}
+
+// Insert adds a new list-member defined by the key/value pair to the
+// TraceState. If a list-member already exists for the given key, that
+// list-member's value is updated. The new or updated list-member is always
+// moved to the beginning of the TraceState as specified by the W3C Trace
+// Context specification.
+//
+// If key or value are invalid according to the W3C Trace Context
+// specification an error is returned with the original TraceState.
+//
+// If adding a new list-member means the TraceState would have more members
+// then is allowed, the new list-member will be inserted and the right-most
+// list-member will be dropped in the returned TraceState.
+func (ts TraceState) Insert(key, value string) (TraceState, error) {
+	m, err := newMember(key, value)
+	if err != nil {
+		return ts, err
+	}
+
+	cTS := ts.Delete(key)
+	if cTS.Len()+1 <= maxListMembers {
+		cTS.list = append(cTS.list, member{})
+	}
+	// When the number of members exceeds capacity, drop the "right-most".
+	copy(cTS.list[1:], cTS.list)
+	cTS.list[0] = m
+
+	return cTS, nil
+}
+
+// Delete returns a copy of the TraceState with the list-member identified by
+// key removed.
+func (ts TraceState) Delete(key string) TraceState {
+	members := make([]member, ts.Len())
+	copy(members, ts.list)
+	for i, member := range ts.list {
+		if member.Key == key {
+			members = append(members[:i], members[i+1:]...)
+			// TraceState should contain no duplicate members.
+			break
+		}
+	}
+	return TraceState{list: members}
+}
+
+// Len returns the number of list-members in the TraceState.
+func (ts TraceState) Len() int {
+	return len(ts.list)
+}
diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh
new file mode 100644
index 000000000..dbb61a422
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+cd $(dirname $0)
+TOOLS_DIR=$(pwd)/.tools
+
+if [ -z "${GOPATH}" ] ; then
+	printf "GOPATH is not defined.\n"
+	exit -1
+fi
+
+if [ ! -d "${GOPATH}" ] ; then
+	printf "GOPATH ${GOPATH} is invalid \n"
+	exit -1
+fi
+
+# Pre-requisites
+if ! git diff --quiet; then \
+	git status
+	printf "\n\nError: working tree is not clean\n"
+	exit -1
+fi
+
+if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
+	printf "$(git log -1)"
+	printf "\n\nError: HEAD is not pointing to a tagged version"
+fi
+
+make ${TOOLS_DIR}/gojq
+
+DIR_TMP="${GOPATH}/src/oteltmp/"
+rm -rf $DIR_TMP
+mkdir -p $DIR_TMP
+
+printf "Copy examples to ${DIR_TMP}\n"
+cp -a ./example ${DIR_TMP}
+
+# Update go.mod files
+printf "Update go.mod: rename module and remove replace\n"
+
+PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
+
+for dir in $PACKAGE_DIRS; do
+	printf "  Update go.mod for $dir\n"
+	(cd "${DIR_TMP}/${dir}" && \
+	 # replaces is ("mod1" "mod2" …)
+	 replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
+	 # strip double quotes
+	 replaces=("${replaces[@]%\"}") && \
+	 replaces=("${replaces[@]#\"}") && \
+	 # make an array (-dropreplace=mod1 -dropreplace=mod2 …)
+	 dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
+	 go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
+	 go mod tidy)
+done
+printf "Update done:\n\n"
+
+# Build directories that contain main package. These directories are different than
+# directories that contain go.mod files.
+printf "Build examples:\n"
+EXAMPLES=$(./get_main_pkgs.sh ./example)
+for ex in $EXAMPLES; do
+	printf "  Build $ex in ${DIR_TMP}/${ex}\n"
+	(cd "${DIR_TMP}/${ex}" && \
+	 go build .)
+done
+
+# Cleanup
+printf "Remove copied files.\n"
+rm -rf $DIR_TMP
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
new file mode 100644
index 000000000..0e8e5e023
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel // import "go.opentelemetry.io/otel"
+
+// Version is the current release version of OpenTelemetry in use.
+func Version() string {
+	return "1.14.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
new file mode 100644
index 000000000..40df1fae4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -0,0 +1,57 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+module-sets:
+  stable-v1:
+    version: v1.14.0
+    modules:
+      - go.opentelemetry.io/otel
+      - go.opentelemetry.io/otel/bridge/opentracing
+      - go.opentelemetry.io/otel/bridge/opentracing/test
+      - go.opentelemetry.io/otel/example/fib
+      - go.opentelemetry.io/otel/example/jaeger
+      - go.opentelemetry.io/otel/example/namedtracer
+      - go.opentelemetry.io/otel/example/otel-collector
+      - go.opentelemetry.io/otel/example/passthrough
+      - go.opentelemetry.io/otel/example/zipkin
+      - go.opentelemetry.io/otel/exporters/jaeger
+      - go.opentelemetry.io/otel/exporters/zipkin
+      - go.opentelemetry.io/otel/exporters/otlp/otlptrace
+      - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+      - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+      - go.opentelemetry.io/otel/exporters/otlp/internal/retry
+      - go.opentelemetry.io/otel/exporters/stdout/stdouttrace
+      - go.opentelemetry.io/otel/trace
+      - go.opentelemetry.io/otel/sdk
+  experimental-metrics:
+    version: v0.37.0
+    modules:
+      - go.opentelemetry.io/otel/example/opencensus
+      - go.opentelemetry.io/otel/example/prometheus
+      - go.opentelemetry.io/otel/exporters/otlp/otlpmetric
+      - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+      - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
+      - go.opentelemetry.io/otel/exporters/prometheus
+      - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
+      - go.opentelemetry.io/otel/metric
+      - go.opentelemetry.io/otel/sdk/metric
+      - go.opentelemetry.io/otel/bridge/opencensus
+      - go.opentelemetry.io/otel/bridge/opencensus/test
+      - go.opentelemetry.io/otel/example/view
+  experimental-schema:
+    version: v0.0.4
+    modules:
+      - go.opentelemetry.io/otel/schema
+excluded-modules:
+  - go.opentelemetry.io/otel/internal/tools
diff --git a/vendor/go.opentelemetry.io/proto/otlp/LICENSE b/vendor/go.opentelemetry.io/proto/otlp/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
new file mode 100644
index 000000000..fc285c089
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
@@ -0,0 +1,367 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.17.3
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+package v1
+
+import (
+	v1 "go.opentelemetry.io/proto/otlp/trace/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ExportTraceServiceRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An array of ResourceSpans.
+	// For data coming from a single resource this array will typically contain one
+	// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
+	// data from multiple origins typically batch the data before forwarding further and
+	// in that case this array will contain multiple elements.
+	ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
+}
+
+func (x *ExportTraceServiceRequest) Reset() {
+	*x = ExportTraceServiceRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportTraceServiceRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceRequest) ProtoMessage() {}
+
+func (x *ExportTraceServiceRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceRequest.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans {
+	if x != nil {
+		return x.ResourceSpans
+	}
+	return nil
+}
+
+type ExportTraceServiceResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The details of a partially successful export request.
+	//
+	// If the request is only partially accepted
+	// (i.e. when the server accepts only parts of the data and rejects the rest)
+	// the server MUST initialize the `partial_success` field and MUST
+	// set the `rejected_<signal>` with the number of items it rejected.
+	//
+	// Servers MAY also make use of the `partial_success` field to convey
+	// warnings/suggestions to senders even when the request was fully accepted.
+	// In such cases, the `rejected_<signal>` MUST have a value of `0` and
+	// the `error_message` MUST be non-empty.
+	//
+	// A `partial_success` message with an empty value (rejected_<signal> = 0 and
+	// `error_message` = "") is equivalent to it not being set/present. Senders
+	// SHOULD interpret it the same way as in the full success case.
+	PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"`
+}
+
+func (x *ExportTraceServiceResponse) Reset() {
+	*x = ExportTraceServiceResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportTraceServiceResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceResponse) ProtoMessage() {}
+
+func (x *ExportTraceServiceResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceResponse.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess {
+	if x != nil {
+		return x.PartialSuccess
+	}
+	return nil
+}
+
+type ExportTracePartialSuccess struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The number of rejected spans.
+	//
+	// A `rejected_<signal>` field holding a `0` value indicates that the
+	// request was fully accepted.
+	RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"`
+	// A developer-facing human-readable message in English. It should be used
+	// either to explain why the server rejected parts of the data during a partial
+	// success or to convey warnings/suggestions during a full success. The message
+	// should offer guidance on how users can address such issues.
+	//
+	// error_message is an optional field. An error_message with an empty value
+	// is equivalent to it not being set.
+	ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+}
+
+func (x *ExportTracePartialSuccess) Reset() {
+	*x = ExportTracePartialSuccess{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExportTracePartialSuccess) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTracePartialSuccess) ProtoMessage() {}
+
+func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead.
+func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 {
+	if x != nil {
+		return x.RejectedSpans
+	}
+	return 0
+}
+
+func (x *ExportTracePartialSuccess) GetErrorMessage() string {
+	if x != nil {
+		return x.ErrorMessage
+	}
+	return ""
+}
+
+var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{
+	0x0a, 0x3a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f,
+	0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63,
+	0x65, 0x2e, 0x76, 0x31, 0x1a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f,
+	0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6f,
+	0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0e, 0x72,
+	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
+	0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73,
+	0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22,
+	0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a,
+	0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
+	0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
+	0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
+	0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
+	0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74,
+	0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78,
+	0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c,
+	0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63,
+	0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
+	0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23,
+	0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73,
+	0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12,
+	0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+	0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54,
+	0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+	0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+	0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f,
+	0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
+	0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72,
+	0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c,
+	0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02,
+	0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54,
+	0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc
+)
+
+func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData
+}
+
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{
+	(*ExportTraceServiceRequest)(nil),  // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest
+	(*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse
+	(*ExportTracePartialSuccess)(nil),  // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess
+	(*v1.ResourceSpans)(nil),           // 3: opentelemetry.proto.trace.v1.ResourceSpans
+}
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{
+	3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+	2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess
+	0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest
+	1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse
+	3, // [3:4] is the sub-list for method output_type
+	2, // [2:3] is the sub-list for method input_type
+	2, // [2:2] is the sub-list for extension type_name
+	2, // [2:2] is the sub-list for extension extendee
+	0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() }
+func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() {
+	if File_opentelemetry_proto_collector_trace_v1_trace_service_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportTraceServiceRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportTraceServiceResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExportTracePartialSuccess); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   3,
+			NumExtensions: 0,
+			NumServices:   1,
+		},
+		GoTypes:           file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs,
+		MessageInfos:      file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_collector_trace_v1_trace_service_proto = out.File
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = nil
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = nil
+	file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
new file mode 100644
index 000000000..d142c2a44
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
@@ -0,0 +1,167 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+	"context"
+	"io"
+	"net/http"
+
+	"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+	"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = metadata.Join
+
+func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq ExportTraceServiceRequest
+	var metadata runtime.ServerMetadata
+
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+	return msg, metadata, err
+
+}
+
+func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+	var protoReq ExportTraceServiceRequest
+	var metadata runtime.ServerMetadata
+
+	newReader, berr := utilities.IOReaderFactory(req.Body)
+	if berr != nil {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+	}
+	if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+		return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+	}
+
+	msg, err := server.Export(ctx, &protoReq)
+	return msg, metadata, err
+
+}
+
+// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux".
+// UnaryRPC     :call TraceServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead.
+func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error {
+
+	mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		var stream runtime.ServerTransportStream
+		ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace"))
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams)
+		md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+	conn, err := grpc.Dial(endpoint, opts...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+			return
+		}
+		go func() {
+			<-ctx.Done()
+			if cerr := conn.Close(); cerr != nil {
+				grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+			}
+		}()
+	}()
+
+	return RegisterTraceServiceHandler(ctx, mux, conn)
+}
+
+// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+	return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
+}
+
+// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "TraceServiceClient" to call the correct interceptors.
+func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
+
+	mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+		ctx, cancel := context.WithCancel(req.Context())
+		defer cancel()
+		inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+		rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace"))
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+		resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
+		ctx = runtime.NewServerMetadataContext(ctx, md)
+		if err != nil {
+			runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+			return
+		}
+
+		forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+	})
+
+	return nil
+}
+
+var (
+	pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, ""))
+)
+
+var (
+	forward_TraceService_Export_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
new file mode 100644
index 000000000..c21f2cb47
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.1.0
+// - protoc             v3.17.3
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+package v1
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// TraceServiceClient is the client API for TraceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type TraceServiceClient interface {
+	// For performance reasons, it is recommended to keep this RPC
+	// alive for the entire life of the application.
+	Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error)
+}
+
+type traceServiceClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient {
+	return &traceServiceClient{cc}
+}
+
+func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) {
+	out := new(ExportTraceServiceResponse)
+	err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// TraceServiceServer is the server API for TraceService service.
+// All implementations must embed UnimplementedTraceServiceServer
+// for forward compatibility
+type TraceServiceServer interface {
+	// For performance reasons, it is recommended to keep this RPC
+	// alive for the entire life of the application.
+	Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error)
+	mustEmbedUnimplementedTraceServiceServer()
+}
+
+// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations.
+type UnimplementedTraceServiceServer struct {
+}
+
+func (UnimplementedTraceServiceServer) Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+func (UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {}
+
+// UnsafeTraceServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to TraceServiceServer will
+// result in compilation errors.
+type UnsafeTraceServiceServer interface {
+	mustEmbedUnimplementedTraceServiceServer()
+}
+
+func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) {
+	s.RegisterService(&TraceService_ServiceDesc, srv)
+}
+
+func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ExportTraceServiceRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TraceServiceServer).Export(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+// TraceService_ServiceDesc is the grpc.ServiceDesc for TraceService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var TraceService_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService",
+	HandlerType: (*TraceServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Export",
+			Handler:    _TraceService_Export_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
new file mode 100644
index 000000000..8502e607b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
@@ -0,0 +1,627 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.17.3
+// source: opentelemetry/proto/common/v1/common.proto
+
+package v1
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// AnyValue is used to represent any type of attribute value. AnyValue may contain a
+// primitive value such as a string or integer or it may contain an arbitrary nested
+// object containing arrays, key-value lists and primitives.
+type AnyValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The value is one of the listed fields. It is valid for all values to be unspecified
+	// in which case this AnyValue is considered to be "empty".
+	//
+	// Types that are assignable to Value:
+	//	*AnyValue_StringValue
+	//	*AnyValue_BoolValue
+	//	*AnyValue_IntValue
+	//	*AnyValue_DoubleValue
+	//	*AnyValue_ArrayValue
+	//	*AnyValue_KvlistValue
+	//	*AnyValue_BytesValue
+	Value isAnyValue_Value `protobuf_oneof:"value"`
+}
+
+func (x *AnyValue) Reset() {
+	*x = AnyValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *AnyValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AnyValue) ProtoMessage() {}
+
+func (x *AnyValue) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use AnyValue.ProtoReflect.Descriptor instead.
+func (*AnyValue) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *AnyValue) GetValue() isAnyValue_Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (x *AnyValue) GetStringValue() string {
+	if x, ok := x.GetValue().(*AnyValue_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (x *AnyValue) GetBoolValue() bool {
+	if x, ok := x.GetValue().(*AnyValue_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (x *AnyValue) GetIntValue() int64 {
+	if x, ok := x.GetValue().(*AnyValue_IntValue); ok {
+		return x.IntValue
+	}
+	return 0
+}
+
+func (x *AnyValue) GetDoubleValue() float64 {
+	if x, ok := x.GetValue().(*AnyValue_DoubleValue); ok {
+		return x.DoubleValue
+	}
+	return 0
+}
+
+func (x *AnyValue) GetArrayValue() *ArrayValue {
+	if x, ok := x.GetValue().(*AnyValue_ArrayValue); ok {
+		return x.ArrayValue
+	}
+	return nil
+}
+
+func (x *AnyValue) GetKvlistValue() *KeyValueList {
+	if x, ok := x.GetValue().(*AnyValue_KvlistValue); ok {
+		return x.KvlistValue
+	}
+	return nil
+}
+
+func (x *AnyValue) GetBytesValue() []byte {
+	if x, ok := x.GetValue().(*AnyValue_BytesValue); ok {
+		return x.BytesValue
+	}
+	return nil
+}
+
+type isAnyValue_Value interface {
+	isAnyValue_Value()
+}
+
+type AnyValue_StringValue struct {
+	StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type AnyValue_BoolValue struct {
+	BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type AnyValue_IntValue struct {
+	IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type AnyValue_DoubleValue struct {
+	DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type AnyValue_ArrayValue struct {
+	ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"`
+}
+
+type AnyValue_KvlistValue struct {
+	KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"`
+}
+
+type AnyValue_BytesValue struct {
+	BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+func (*AnyValue_StringValue) isAnyValue_Value() {}
+
+func (*AnyValue_BoolValue) isAnyValue_Value() {}
+
+func (*AnyValue_IntValue) isAnyValue_Value() {}
+
+func (*AnyValue_DoubleValue) isAnyValue_Value() {}
+
+func (*AnyValue_ArrayValue) isAnyValue_Value() {}
+
+func (*AnyValue_KvlistValue) isAnyValue_Value() {}
+
+func (*AnyValue_BytesValue) isAnyValue_Value() {}
+
+// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
+// since oneof in AnyValue does not allow repeated fields.
+type ArrayValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Array of values. The array may be empty (contain 0 elements).
+	Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ArrayValue) Reset() {
+	*x = ArrayValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ArrayValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ArrayValue) ProtoMessage() {}
+
+func (x *ArrayValue) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ArrayValue.ProtoReflect.Descriptor instead.
+func (*ArrayValue) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ArrayValue) GetValues() []*AnyValue {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
+// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
+// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
+// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
+// are semantically equivalent.
+type KeyValueList struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A collection of key/value pairs of key-value pairs. The list may be empty (may
+	// contain 0 elements).
+	// The keys MUST be unique (it is not allowed to have more than one
+	// value with the same key).
+	Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *KeyValueList) Reset() {
+	*x = KeyValueList{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *KeyValueList) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValueList) ProtoMessage() {}
+
+func (x *KeyValueList) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValueList.ProtoReflect.Descriptor instead.
+func (*KeyValueList) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *KeyValueList) GetValues() []*KeyValue {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+// KeyValue is a key-value pair that is used to store Span attributes, Link
+// attributes, etc.
+type KeyValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Key   string    `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *KeyValue) Reset() {
+	*x = KeyValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *KeyValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValue) ProtoMessage() {}
+
+func (x *KeyValue) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
+func (*KeyValue) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *KeyValue) GetKey() string {
+	if x != nil {
+		return x.Key
+	}
+	return ""
+}
+
+func (x *KeyValue) GetValue() *AnyValue {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+// InstrumentationScope is a message representing the instrumentation scope information
+// such as the fully qualified name and version.
+type InstrumentationScope struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An empty instrumentation scope name means the name is unknown.
+	Name                   string      `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Version                string      `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	Attributes             []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	DroppedAttributesCount uint32      `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *InstrumentationScope) Reset() {
+	*x = InstrumentationScope{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *InstrumentationScope) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InstrumentationScope) ProtoMessage() {}
+
+func (x *InstrumentationScope) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead.
+func (*InstrumentationScope) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *InstrumentationScope) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *InstrumentationScope) GetVersion() string {
+	if x != nil {
+		return x.Version
+	}
+	return ""
+}
+
+func (x *InstrumentationScope) GetAttributes() []*KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{
+	0x0a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f,
+	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08,
+	0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69,
+	0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+	0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a,
+	0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d,
+	0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a,
+	0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20,
+	0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
+	0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x50, 0x0a, 0x0c, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+	0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
+	0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c,
+	0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d,
+	0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f,
+	0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a,
+	0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a,
+	0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+	0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+	0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b,
+	0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+	0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70,
+	0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14,
+	0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+	0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73,
+	0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+	0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+	0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+	0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
+	0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+	0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64,
+	0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
+	0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64,
+	0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+	0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f,
+	0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+	0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_common_v1_common_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_common_v1_common_proto_rawDescData = file_opentelemetry_proto_common_v1_common_proto_rawDesc
+)
+
+func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_common_v1_common_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_common_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_common_v1_common_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_common_v1_common_proto_rawDescData
+}
+
+var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{
+	(*AnyValue)(nil),             // 0: opentelemetry.proto.common.v1.AnyValue
+	(*ArrayValue)(nil),           // 1: opentelemetry.proto.common.v1.ArrayValue
+	(*KeyValueList)(nil),         // 2: opentelemetry.proto.common.v1.KeyValueList
+	(*KeyValue)(nil),             // 3: opentelemetry.proto.common.v1.KeyValue
+	(*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope
+}
+var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{
+	1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue
+	2, // 1: opentelemetry.proto.common.v1.AnyValue.kvlist_value:type_name -> opentelemetry.proto.common.v1.KeyValueList
+	0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue
+	3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue
+	0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue
+	3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	6, // [6:6] is the sub-list for method output_type
+	6, // [6:6] is the sub-list for method input_type
+	6, // [6:6] is the sub-list for extension type_name
+	6, // [6:6] is the sub-list for extension extendee
+	0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_common_v1_common_proto_init() }
+func file_opentelemetry_proto_common_v1_common_proto_init() {
+	if File_opentelemetry_proto_common_v1_common_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*AnyValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ArrayValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*KeyValueList); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*KeyValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*InstrumentationScope); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{
+		(*AnyValue_StringValue)(nil),
+		(*AnyValue_BoolValue)(nil),
+		(*AnyValue_IntValue)(nil),
+		(*AnyValue_DoubleValue)(nil),
+		(*AnyValue_ArrayValue)(nil),
+		(*AnyValue_KvlistValue)(nil),
+		(*AnyValue_BytesValue)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   5,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_opentelemetry_proto_common_v1_common_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_common_v1_common_proto_depIdxs,
+		MessageInfos:      file_opentelemetry_proto_common_v1_common_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_common_v1_common_proto = out.File
+	file_opentelemetry_proto_common_v1_common_proto_rawDesc = nil
+	file_opentelemetry_proto_common_v1_common_proto_goTypes = nil
+	file_opentelemetry_proto_common_v1_common_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
new file mode 100644
index 000000000..bcc1060e3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
@@ -0,0 +1,193 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.17.3
+// source: opentelemetry/proto/resource/v1/resource.proto
+
+package v1
+
+import (
+	v1 "go.opentelemetry.io/proto/otlp/common/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Resource information.
+type Resource struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Set of attributes that describe the resource.
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
+	// no attributes were dropped.
+	DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Resource) Reset() {
+	*x = Resource{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Resource) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Resource) ProtoMessage() {}
+
+func (x *Resource) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Resource.ProtoReflect.Descriptor instead.
+func (*Resource) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Resource) GetAttributes() []*v1.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *Resource) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{
+	0x0a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76,
+	0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x1f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76,
+	0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31,
+	0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01,
+	0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
+	0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+	0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b,
+	0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+	0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
+	0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
+	0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01,
+	0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+	0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+	0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72,
+	0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76,
+	0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+	0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+	0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = file_opentelemetry_proto_resource_v1_resource_proto_rawDesc
+)
+
+func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_resource_v1_resource_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_resource_v1_resource_proto_rawDescData
+}
+
+var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{
+	(*Resource)(nil),    // 0: opentelemetry.proto.resource.v1.Resource
+	(*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{
+	1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	1, // [1:1] is the sub-list for method output_type
+	1, // [1:1] is the sub-list for method input_type
+	1, // [1:1] is the sub-list for extension type_name
+	1, // [1:1] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() }
+func file_opentelemetry_proto_resource_v1_resource_proto_init() {
+	if File_opentelemetry_proto_resource_v1_resource_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Resource); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_resource_v1_resource_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_opentelemetry_proto_resource_v1_resource_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_resource_v1_resource_proto_depIdxs,
+		MessageInfos:      file_opentelemetry_proto_resource_v1_resource_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_resource_v1_resource_proto = out.File
+	file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = nil
+	file_opentelemetry_proto_resource_v1_resource_proto_goTypes = nil
+	file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
new file mode 100644
index 000000000..499a43d77
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
@@ -0,0 +1,1140 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.17.3
+// source: opentelemetry/proto/trace/v1/trace.proto
+
+package v1
+
+import (
+	v11 "go.opentelemetry.io/proto/otlp/common/v1"
+	v1 "go.opentelemetry.io/proto/otlp/resource/v1"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type Span_SpanKind int32
+
+const (
+	// Unspecified. Do NOT use as default.
+	// Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED.
+	Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
+	// Indicates that the span represents an internal operation within an application,
+	// as opposed to an operation happening at the boundaries. Default value.
+	Span_SPAN_KIND_INTERNAL Span_SpanKind = 1
+	// Indicates that the span covers server-side handling of an RPC or other
+	// remote network request.
+	Span_SPAN_KIND_SERVER Span_SpanKind = 2
+	// Indicates that the span describes a request to some remote service.
+	Span_SPAN_KIND_CLIENT Span_SpanKind = 3
+	// Indicates that the span describes a producer sending a message to a broker.
+	// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
+	// between producer and consumer spans. A PRODUCER span ends when the message was accepted
+	// by the broker while the logical processing of the message might span a much longer time.
+	Span_SPAN_KIND_PRODUCER Span_SpanKind = 4
+	// Indicates that the span describes consumer receiving a message from a broker.
+	// Like the PRODUCER kind, there is often no direct critical path latency relationship
+	// between producer and consumer spans.
+	Span_SPAN_KIND_CONSUMER Span_SpanKind = 5
+)
+
+// Enum value maps for Span_SpanKind.
+var (
+	Span_SpanKind_name = map[int32]string{
+		0: "SPAN_KIND_UNSPECIFIED",
+		1: "SPAN_KIND_INTERNAL",
+		2: "SPAN_KIND_SERVER",
+		3: "SPAN_KIND_CLIENT",
+		4: "SPAN_KIND_PRODUCER",
+		5: "SPAN_KIND_CONSUMER",
+	}
+	Span_SpanKind_value = map[string]int32{
+		"SPAN_KIND_UNSPECIFIED": 0,
+		"SPAN_KIND_INTERNAL":    1,
+		"SPAN_KIND_SERVER":      2,
+		"SPAN_KIND_CLIENT":      3,
+		"SPAN_KIND_PRODUCER":    4,
+		"SPAN_KIND_CONSUMER":    5,
+	}
+)
+
+func (x Span_SpanKind) Enum() *Span_SpanKind {
+	p := new(Span_SpanKind)
+	*p = x
+	return p
+}
+
+func (x Span_SpanKind) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor {
+	return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor()
+}
+
+func (Span_SpanKind) Type() protoreflect.EnumType {
+	return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0]
+}
+
+func (x Span_SpanKind) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Span_SpanKind.Descriptor instead.
+func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0}
+}
+
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type Status_StatusCode int32
+
+const (
+	// The default status.
+	Status_STATUS_CODE_UNSET Status_StatusCode = 0
+	// The Span has been validated by an Application developers or Operator to have
+	// completed successfully.
+	Status_STATUS_CODE_OK Status_StatusCode = 1
+	// The Span contains an error.
+	Status_STATUS_CODE_ERROR Status_StatusCode = 2
+)
+
+// Enum value maps for Status_StatusCode.
+var (
+	Status_StatusCode_name = map[int32]string{
+		0: "STATUS_CODE_UNSET",
+		1: "STATUS_CODE_OK",
+		2: "STATUS_CODE_ERROR",
+	}
+	Status_StatusCode_value = map[string]int32{
+		"STATUS_CODE_UNSET": 0,
+		"STATUS_CODE_OK":    1,
+		"STATUS_CODE_ERROR": 2,
+	}
+)
+
+func (x Status_StatusCode) Enum() *Status_StatusCode {
+	p := new(Status_StatusCode)
+	*p = x
+	return p
+}
+
+func (x Status_StatusCode) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor {
+	return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor()
+}
+
+func (Status_StatusCode) Type() protoreflect.EnumType {
+	return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1]
+}
+
+func (x Status_StatusCode) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Status_StatusCode.Descriptor instead.
+func (Status_StatusCode) EnumDescriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0}
+}
+
+// TracesData represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type TracesData struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An array of ResourceSpans.
+	// For data coming from a single resource this array will typically contain
+	// one element. Intermediary nodes that receive data from multiple origins
+	// typically batch the data before forwarding further and in that case this
+	// array will contain multiple elements.
+	ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
+}
+
+func (x *TracesData) Reset() {
+	*x = TracesData{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TracesData) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TracesData) ProtoMessage() {}
+
+func (x *TracesData) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TracesData.ProtoReflect.Descriptor instead.
+func (*TracesData) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TracesData) GetResourceSpans() []*ResourceSpans {
+	if x != nil {
+		return x.ResourceSpans
+	}
+	return nil
+}
+
+// A collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The resource for the spans in this message.
+	// If this field is not set then no resource info is known.
+	Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+	// A list of ScopeSpans that originate from a resource.
+	ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
+	// This schema_url applies to the data in the "resource" field. It does not apply
+	// to the data in the "scope_spans" field which have their own schema_url field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ResourceSpans) Reset() {
+	*x = ResourceSpans{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ResourceSpans) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceSpans) ProtoMessage() {}
+
+func (x *ResourceSpans) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceSpans.ProtoReflect.Descriptor instead.
+func (*ResourceSpans) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ResourceSpans) GetResource() *v1.Resource {
+	if x != nil {
+		return x.Resource
+	}
+	return nil
+}
+
+func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans {
+	if x != nil {
+		return x.ScopeSpans
+	}
+	return nil
+}
+
+func (x *ResourceSpans) GetSchemaUrl() string {
+	if x != nil {
+		return x.SchemaUrl
+	}
+	return ""
+}
+
+// A collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The instrumentation scope information for the spans in this message.
+	// Semantically when InstrumentationScope isn't set, it is equivalent with
+	// an empty instrumentation scope name (unknown).
+	Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"`
+	// A list of Spans that originate from an instrumentation scope.
+	Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+	// This schema_url applies to all spans and span events in the "spans" field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ScopeSpans) Reset() {
+	*x = ScopeSpans{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ScopeSpans) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopeSpans) ProtoMessage() {}
+
+func (x *ScopeSpans) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopeSpans.ProtoReflect.Descriptor instead.
+func (*ScopeSpans) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ScopeSpans) GetScope() *v11.InstrumentationScope {
+	if x != nil {
+		return x.Scope
+	}
+	return nil
+}
+
+func (x *ScopeSpans) GetSpans() []*Span {
+	if x != nil {
+		return x.Spans
+	}
+	return nil
+}
+
+func (x *ScopeSpans) GetSchemaUrl() string {
+	if x != nil {
+		return x.SchemaUrl
+	}
+	return ""
+}
+
+// A Span represents a single operation performed by a single component of the system.
+//
+// The next available field id is 17.
+type Span struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A unique identifier for a trace. All spans from the same trace share
+	// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes
+	// is considered invalid.
+	//
+	// This field is semantically required. Receiver should generate new
+	// random trace_id if empty or invalid trace_id was received.
+	//
+	// This field is required.
+	TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+	// A unique identifier for a span within a trace, assigned when the span
+	// is created. The ID is an 8-byte array. An ID with all zeroes is considered
+	// invalid.
+	//
+	// This field is semantically required. Receiver should generate new
+	// random span_id if empty or invalid span_id was received.
+	//
+	// This field is required.
+	SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+	// trace_state conveys information about request position in multiple distributed tracing graphs.
+	// It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+	// See also https://github.com/w3c/distributed-tracing for more details about this field.
+	TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
+	// The `span_id` of this span's parent span. If this is a root span, then this
+	// field must be empty. The ID is an 8-byte array.
+	ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+	// A description of the span's operation.
+	//
+	// For example, the name can be a qualified method name or a file name
+	// and a line number where the operation is called. A best practice is to use
+	// the same display name at the same call point in an application.
+	// This makes it easier to correlate spans in different traces.
+	//
+	// This field is semantically required to be set to non-empty string.
+	// Empty value is equivalent to an unknown span name.
+	//
+	// This field is required.
+	Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+	// Distinguishes between spans generated in a particular context. For example,
+	// two spans with the same name may be distinguished using `CLIENT` (caller)
+	// and `SERVER` (callee) to identify queueing latency associated with the span.
+	Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
+	// start_time_unix_nano is the start time of the span. On the client side, this is the time
+	// kept by the local machine where the span execution starts. On the server side, this
+	// is the time when the server's application handler starts running.
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+	//
+	// This field is semantically required and it is expected that end_time >= start_time.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// end_time_unix_nano is the end time of the span. On the client side, this is the time
+	// kept by the local machine where the span execution ends. On the server side, this
+	// is the time when the server application handler stops running.
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+	//
+	// This field is semantically required and it is expected that end_time >= start_time.
+	EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"`
+	// attributes is a collection of key/value pairs. Note, global attributes
+	// like server name can be set using the resource API. Examples of attributes:
+	//
+	//     "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+	//     "/http/server_latency": 300
+	//     "abc.com/myattribute": true
+	//     "abc.com/score": 10.239
+	//
+	// The OpenTelemetry API specification further restricts the allowed value types:
+	// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// dropped_attributes_count is the number of attributes that were discarded. Attributes
+	// can be discarded because their keys are too long or because there are too many
+	// attributes. If this value is 0, then no attributes were dropped.
+	DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+	// events is a collection of Event items.
+	Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
+	// dropped_events_count is the number of dropped events. If the value is 0, then no
+	// events were dropped.
+	DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"`
+	// links is a collection of Links, which are references from this span to a span
+	// in the same or different trace.
+	Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"`
+	// dropped_links_count is the number of dropped links after the maximum size was
+	// enforced. If this value is 0, then no links were dropped.
+	DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
+	// An optional final status for this span. Semantically when Status isn't set, it means
+	// span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+	Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (x *Span) Reset() {
+	*x = Span{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Span) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span) ProtoMessage() {}
+
+func (x *Span) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span.ProtoReflect.Descriptor instead.
+func (*Span) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Span) GetTraceId() []byte {
+	if x != nil {
+		return x.TraceId
+	}
+	return nil
+}
+
+func (x *Span) GetSpanId() []byte {
+	if x != nil {
+		return x.SpanId
+	}
+	return nil
+}
+
+func (x *Span) GetTraceState() string {
+	if x != nil {
+		return x.TraceState
+	}
+	return ""
+}
+
+func (x *Span) GetParentSpanId() []byte {
+	if x != nil {
+		return x.ParentSpanId
+	}
+	return nil
+}
+
+func (x *Span) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Span) GetKind() Span_SpanKind {
+	if x != nil {
+		return x.Kind
+	}
+	return Span_SPAN_KIND_UNSPECIFIED
+}
+
+func (x *Span) GetStartTimeUnixNano() uint64 {
+	if x != nil {
+		return x.StartTimeUnixNano
+	}
+	return 0
+}
+
+func (x *Span) GetEndTimeUnixNano() uint64 {
+	if x != nil {
+		return x.EndTimeUnixNano
+	}
+	return 0
+}
+
+func (x *Span) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *Span) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+func (x *Span) GetEvents() []*Span_Event {
+	if x != nil {
+		return x.Events
+	}
+	return nil
+}
+
+func (x *Span) GetDroppedEventsCount() uint32 {
+	if x != nil {
+		return x.DroppedEventsCount
+	}
+	return 0
+}
+
+func (x *Span) GetLinks() []*Span_Link {
+	if x != nil {
+		return x.Links
+	}
+	return nil
+}
+
+func (x *Span) GetDroppedLinksCount() uint32 {
+	if x != nil {
+		return x.DroppedLinksCount
+	}
+	return 0
+}
+
+func (x *Span) GetStatus() *Status {
+	if x != nil {
+		return x.Status
+	}
+	return nil
+}
+
+// The Status type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A developer-facing human readable error message.
+	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	// The status code.
+	Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"`
+}
+
+func (x *Status) Reset() {
+	*x = Status{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Status) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Status) GetMessage() string {
+	if x != nil {
+		return x.Message
+	}
+	return ""
+}
+
+func (x *Status) GetCode() Status_StatusCode {
+	if x != nil {
+		return x.Code
+	}
+	return Status_STATUS_CODE_UNSET
+}
+
+// Event is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs.
+type Span_Event struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// time_unix_nano is the time the event occurred.
+	TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// name of the event.
+	// This field is semantically required to be set to non-empty string.
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	// attributes is a collection of attribute key/value pairs on the event.
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// dropped_attributes_count is the number of dropped attributes. If the value is 0,
+	// then no attributes were dropped.
+	DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Span_Event) Reset() {
+	*x = Span_Event{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Span_Event) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Event) ProtoMessage() {}
+
+func (x *Span_Event) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Event.ProtoReflect.Descriptor instead.
+func (*Span_Event) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *Span_Event) GetTimeUnixNano() uint64 {
+	if x != nil {
+		return x.TimeUnixNano
+	}
+	return 0
+}
+
+func (x *Span_Event) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Span_Event) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *Span_Event) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type Span_Link struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A unique identifier of a trace that this linked span is part of. The ID is a
+	// 16-byte array.
+	TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+	// A unique identifier for the linked span. The ID is an 8-byte array.
+	SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+	// The trace_state associated with the link.
+	TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
+	// attributes is a collection of attribute key/value pairs on the link.
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// dropped_attributes_count is the number of dropped attributes. If the value is 0,
+	// then no attributes were dropped.
+	DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Span_Link) Reset() {
+	*x = Span_Link{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Span_Link) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Link) ProtoMessage() {}
+
+func (x *Span_Link) ProtoReflect() protoreflect.Message {
+	mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead.
+func (*Span_Link) Descriptor() ([]byte, []int) {
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1}
+}
+
+func (x *Span_Link) GetTraceId() []byte {
+	if x != nil {
+		return x.TraceId
+	}
+	return nil
+}
+
+func (x *Span_Link) GetSpanId() []byte {
+	if x != nil {
+		return x.SpanId
+	}
+	return nil
+}
+
+func (x *Span_Link) GetTraceState() string {
+	if x != nil {
+		return x.TraceState
+	}
+	return ""
+}
+
+func (x *Span_Link) GetAttributes() []*v11.KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *Span_Link) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
+
+var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
+	0x0a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74,
+	0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e,
+	0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+	0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f,
+	0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+	0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x44, 0x61,
+	0x74, 0x61, 0x12, 0x52, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73,
+	0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+	0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75,
+	0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
+	0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73,
+	0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
+	0x49, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+	0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+	0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a,
+	0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+	0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9,
+	0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73,
+	0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+	0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+	0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73,
+	0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05,
+	0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f,
+	0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a,
+	0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+	0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e,
+	0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49,
+	0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61,
+	0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61,
+	0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65,
+	0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04,
+	0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65,
+	0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53,
+	0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a,
+	0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78,
+	0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61,
+	0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b,
+	0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f,
+	0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54,
+	0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61,
+	0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+	0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
+	0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f,
+	0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+	0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41,
+	0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40,
+	0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
+	0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70,
+	0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
+	0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e,
+	0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12,
+	0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75,
+	0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
+	0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
+	0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e,
+	0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11,
+	0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e,
+	0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
+	0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a,
+	0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d,
+	0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12,
+	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+	0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
+	0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
+	0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18,
+	0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+	0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16,
+	0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
+	0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12,
+	0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70,
+	0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61,
+	0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61,
+	0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53,
+	0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+	0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
+	0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63,
+	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a,
+	0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+	0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52,
+	0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+	0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e,
+	0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e,
+	0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
+	0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54,
+	0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f,
+	0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a,
+	0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e,
+	0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44,
+	0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53,
+	0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45,
+	0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18,
+	0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+	0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61,
+	0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61,
+	0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a,
+	0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53,
+	0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54,
+	0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44,
+	0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53,
+	0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08,
+	0x01, 0x10, 0x02, 0x42, 0x77, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+	0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+	0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f,
+	0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+	0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+	0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c,
+	0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce sync.Once
+	file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_proto_rawDesc
+)
+
+func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte {
+	file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce.Do(func() {
+		file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_proto_rawDescData)
+	})
+	return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData
+}
+
+var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{
+	(Span_SpanKind)(0),               // 0: opentelemetry.proto.trace.v1.Span.SpanKind
+	(Status_StatusCode)(0),           // 1: opentelemetry.proto.trace.v1.Status.StatusCode
+	(*TracesData)(nil),               // 2: opentelemetry.proto.trace.v1.TracesData
+	(*ResourceSpans)(nil),            // 3: opentelemetry.proto.trace.v1.ResourceSpans
+	(*ScopeSpans)(nil),               // 4: opentelemetry.proto.trace.v1.ScopeSpans
+	(*Span)(nil),                     // 5: opentelemetry.proto.trace.v1.Span
+	(*Status)(nil),                   // 6: opentelemetry.proto.trace.v1.Status
+	(*Span_Event)(nil),               // 7: opentelemetry.proto.trace.v1.Span.Event
+	(*Span_Link)(nil),                // 8: opentelemetry.proto.trace.v1.Span.Link
+	(*v1.Resource)(nil),              // 9: opentelemetry.proto.resource.v1.Resource
+	(*v11.InstrumentationScope)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationScope
+	(*v11.KeyValue)(nil),             // 11: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{
+	3,  // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+	9,  // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource
+	4,  // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans
+	10, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
+	5,  // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span
+	0,  // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind
+	11, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	7,  // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event
+	8,  // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link
+	6,  // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status
+	1,  // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode
+	11, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	11, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+	13, // [13:13] is the sub-list for method output_type
+	13, // [13:13] is the sub-list for method input_type
+	13, // [13:13] is the sub-list for extension type_name
+	13, // [13:13] is the sub-list for extension extendee
+	0,  // [0:13] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() }
+func file_opentelemetry_proto_trace_v1_trace_proto_init() {
+	if File_opentelemetry_proto_trace_v1_trace_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TracesData); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ResourceSpans); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ScopeSpans); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Span); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Status); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Span_Event); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Span_Link); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc,
+			NumEnums:      2,
+			NumMessages:   7,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_opentelemetry_proto_trace_v1_trace_proto_goTypes,
+		DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_proto_depIdxs,
+		EnumInfos:         file_opentelemetry_proto_trace_v1_trace_proto_enumTypes,
+		MessageInfos:      file_opentelemetry_proto_trace_v1_trace_proto_msgTypes,
+	}.Build()
+	File_opentelemetry_proto_trace_v1_trace_proto = out.File
+	file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = nil
+	file_opentelemetry_proto_trace_v1_trace_proto_goTypes = nil
+	file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = nil
+}
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
new file mode 100644
index 000000000..dc5225b6d
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
@@ -0,0 +1,525 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeseries implements a time series structure for stats collection.
+package timeseries // import "golang.org/x/net/internal/timeseries"
+
+import (
+	"fmt"
+	"log"
+	"time"
+)
+
+const (
+	timeSeriesNumBuckets       = 64
+	minuteHourSeriesNumBuckets = 60
+)
+
+var timeSeriesResolutions = []time.Duration{
+	1 * time.Second,
+	10 * time.Second,
+	1 * time.Minute,
+	10 * time.Minute,
+	1 * time.Hour,
+	6 * time.Hour,
+	24 * time.Hour,          // 1 day
+	7 * 24 * time.Hour,      // 1 week
+	4 * 7 * 24 * time.Hour,  // 4 weeks
+	16 * 7 * 24 * time.Hour, // 16 weeks
+}
+
+var minuteHourSeriesResolutions = []time.Duration{
+	1 * time.Second,
+	1 * time.Minute,
+}
+
+// An Observable is a kind of data that can be aggregated in a time series.
+type Observable interface {
+	Multiply(ratio float64)    // Multiplies the data in self by a given ratio
+	Add(other Observable)      // Adds the data from a different observation to self
+	Clear()                    // Clears the observation so it can be reused.
+	CopyFrom(other Observable) // Copies the contents of a given observation to self
+}
+
+// Float attaches the methods of Observable to a float64.
+type Float float64
+
+// NewFloat returns a Float.
+func NewFloat() Observable {
+	f := Float(0)
+	return &f
+}
+
+// String returns the float as a string.
+func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
+
+// Value returns the float's value.
+func (f *Float) Value() float64 { return float64(*f) }
+
+func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
+
+func (f *Float) Add(other Observable) {
+	o := other.(*Float)
+	*f += *o
+}
+
+func (f *Float) Clear() { *f = 0 }
+
+func (f *Float) CopyFrom(other Observable) {
+	o := other.(*Float)
+	*f = *o
+}
+
+// A Clock tells the current time.
+type Clock interface {
+	Time() time.Time
+}
+
+type defaultClock int
+
+var defaultClockInstance defaultClock
+
+func (defaultClock) Time() time.Time { return time.Now() }
+
+// Information kept per level. Each level consists of a circular list of
+// observations. The start of the level may be derived from end and the
+// len(buckets) * sizeInMillis.
+type tsLevel struct {
+	oldest   int               // index to oldest bucketed Observable
+	newest   int               // index to newest bucketed Observable
+	end      time.Time         // end timestamp for this level
+	size     time.Duration     // duration of the bucketed Observable
+	buckets  []Observable      // collections of observations
+	provider func() Observable // used for creating new Observable
+}
+
+func (l *tsLevel) Clear() {
+	l.oldest = 0
+	l.newest = len(l.buckets) - 1
+	l.end = time.Time{}
+	for i := range l.buckets {
+		if l.buckets[i] != nil {
+			l.buckets[i].Clear()
+			l.buckets[i] = nil
+		}
+	}
+}
+
+func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
+	l.size = size
+	l.provider = f
+	l.buckets = make([]Observable, numBuckets)
+}
+
+// Keeps a sequence of levels. Each level is responsible for storing data at
+// a given resolution. For example, the first level stores data at a one
+// minute resolution while the second level stores data at a one hour
+// resolution.
+
+// Each level is represented by a sequence of buckets. Each bucket spans an
+// interval equal to the resolution of the level. New observations are added
+// to the last bucket.
+type timeSeries struct {
+	provider    func() Observable // make more Observable
+	numBuckets  int               // number of buckets in each level
+	levels      []*tsLevel        // levels of bucketed Observable
+	lastAdd     time.Time         // time of last Observable tracked
+	total       Observable        // convenient aggregation of all Observable
+	clock       Clock             // Clock for getting current time
+	pending     Observable        // observations not yet bucketed
+	pendingTime time.Time         // what time are we keeping in pending
+	dirty       bool              // if there are pending observations
+}
+
+// init initializes a level according to the supplied criteria.
+func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
+	ts.provider = f
+	ts.numBuckets = numBuckets
+	ts.clock = clock
+	ts.levels = make([]*tsLevel, len(resolutions))
+
+	for i := range resolutions {
+		if i > 0 && resolutions[i-1] >= resolutions[i] {
+			log.Print("timeseries: resolutions must be monotonically increasing")
+			break
+		}
+		newLevel := new(tsLevel)
+		newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
+		ts.levels[i] = newLevel
+	}
+
+	ts.Clear()
+}
+
+// Clear removes all observations from the time series.
+func (ts *timeSeries) Clear() {
+	ts.lastAdd = time.Time{}
+	ts.total = ts.resetObservation(ts.total)
+	ts.pending = ts.resetObservation(ts.pending)
+	ts.pendingTime = time.Time{}
+	ts.dirty = false
+
+	for i := range ts.levels {
+		ts.levels[i].Clear()
+	}
+}
+
+// Add records an observation at the current time.
+func (ts *timeSeries) Add(observation Observable) {
+	ts.AddWithTime(observation, ts.clock.Time())
+}
+
+// AddWithTime records an observation at the specified time.
+func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
+
+	smallBucketDuration := ts.levels[0].size
+
+	if t.After(ts.lastAdd) {
+		ts.lastAdd = t
+	}
+
+	if t.After(ts.pendingTime) {
+		ts.advance(t)
+		ts.mergePendingUpdates()
+		ts.pendingTime = ts.levels[0].end
+		ts.pending.CopyFrom(observation)
+		ts.dirty = true
+	} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
+		// The observation is close enough to go into the pending bucket.
+		// This compensates for clock skewing and small scheduling delays
+		// by letting the update stay in the fast path.
+		ts.pending.Add(observation)
+		ts.dirty = true
+	} else {
+		ts.mergeValue(observation, t)
+	}
+}
+
+// mergeValue inserts the observation at the specified time in the past into all levels.
+func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
+	for _, level := range ts.levels {
+		index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
+		if 0 <= index && index < ts.numBuckets {
+			bucketNumber := (level.oldest + index) % ts.numBuckets
+			if level.buckets[bucketNumber] == nil {
+				level.buckets[bucketNumber] = level.provider()
+			}
+			level.buckets[bucketNumber].Add(observation)
+		}
+	}
+	ts.total.Add(observation)
+}
+
+// mergePendingUpdates applies the pending updates into all levels.
+func (ts *timeSeries) mergePendingUpdates() {
+	if ts.dirty {
+		ts.mergeValue(ts.pending, ts.pendingTime)
+		ts.pending = ts.resetObservation(ts.pending)
+		ts.dirty = false
+	}
+}
+
+// advance cycles the buckets at each level until the latest bucket in
+// each level can hold the time specified.
+func (ts *timeSeries) advance(t time.Time) {
+	if !t.After(ts.levels[0].end) {
+		return
+	}
+	for i := 0; i < len(ts.levels); i++ {
+		level := ts.levels[i]
+		if !level.end.Before(t) {
+			break
+		}
+
+		// If the time is sufficiently far, just clear the level and advance
+		// directly.
+		if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
+			for _, b := range level.buckets {
+				ts.resetObservation(b)
+			}
+			level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
+		}
+
+		for t.After(level.end) {
+			level.end = level.end.Add(level.size)
+			level.newest = level.oldest
+			level.oldest = (level.oldest + 1) % ts.numBuckets
+			ts.resetObservation(level.buckets[level.newest])
+		}
+
+		t = level.end
+	}
+}
+
+// Latest returns the sum of the num latest buckets from the level.
+func (ts *timeSeries) Latest(level, num int) Observable {
+	now := ts.clock.Time()
+	if ts.levels[0].end.Before(now) {
+		ts.advance(now)
+	}
+
+	ts.mergePendingUpdates()
+
+	result := ts.provider()
+	l := ts.levels[level]
+	index := l.newest
+
+	for i := 0; i < num; i++ {
+		if l.buckets[index] != nil {
+			result.Add(l.buckets[index])
+		}
+		if index == 0 {
+			index = ts.numBuckets
+		}
+		index--
+	}
+
+	return result
+}
+
+// LatestBuckets returns a copy of the num latest buckets from level.
+func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
+	if level < 0 || level > len(ts.levels) {
+		log.Print("timeseries: bad level argument: ", level)
+		return nil
+	}
+	if num < 0 || num >= ts.numBuckets {
+		log.Print("timeseries: bad num argument: ", num)
+		return nil
+	}
+
+	results := make([]Observable, num)
+	now := ts.clock.Time()
+	if ts.levels[0].end.Before(now) {
+		ts.advance(now)
+	}
+
+	ts.mergePendingUpdates()
+
+	l := ts.levels[level]
+	index := l.newest
+
+	for i := 0; i < num; i++ {
+		result := ts.provider()
+		results[i] = result
+		if l.buckets[index] != nil {
+			result.CopyFrom(l.buckets[index])
+		}
+
+		if index == 0 {
+			index = ts.numBuckets
+		}
+		index -= 1
+	}
+	return results
+}
+
+// ScaleBy updates observations by scaling by factor.
+func (ts *timeSeries) ScaleBy(factor float64) {
+	for _, l := range ts.levels {
+		for i := 0; i < ts.numBuckets; i++ {
+			l.buckets[i].Multiply(factor)
+		}
+	}
+
+	ts.total.Multiply(factor)
+	ts.pending.Multiply(factor)
+}
+
+// Range returns the sum of observations added over the specified time range.
+// If start or finish times don't fall on bucket boundaries of the same
+// level, then return values are approximate answers.
+func (ts *timeSeries) Range(start, finish time.Time) Observable {
+	return ts.ComputeRange(start, finish, 1)[0]
+}
+
+// Recent returns the sum of observations from the last delta.
+func (ts *timeSeries) Recent(delta time.Duration) Observable {
+	now := ts.clock.Time()
+	return ts.Range(now.Add(-delta), now)
+}
+
+// Total returns the total of all observations.
+func (ts *timeSeries) Total() Observable {
+	ts.mergePendingUpdates()
+	return ts.total
+}
+
+// ComputeRange computes a specified number of values into a slice using
+// the observations recorded over the specified time period. The return
+// values are approximate if the start or finish times don't fall on the
+// bucket boundaries at the same level or if the number of buckets spanning
+// the range is not an integral multiple of num.
+func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
+	if start.After(finish) {
+		log.Printf("timeseries: start > finish, %v>%v", start, finish)
+		return nil
+	}
+
+	if num < 0 {
+		log.Printf("timeseries: num < 0, %v", num)
+		return nil
+	}
+
+	results := make([]Observable, num)
+
+	for _, l := range ts.levels {
+		if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
+			ts.extract(l, start, finish, num, results)
+			return results
+		}
+	}
+
+	// Failed to find a level that covers the desired range. So just
+	// extract from the last level, even if it doesn't cover the entire
+	// desired range.
+	ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
+
+	return results
+}
+
+// RecentList returns the specified number of values in slice over the most
+// recent time period of the specified range.
+func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
+	if delta < 0 {
+		return nil
+	}
+	now := ts.clock.Time()
+	return ts.ComputeRange(now.Add(-delta), now, num)
+}
+
+// extract returns a slice of specified number of observations from a given
+// level over a given range.
+func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
+	ts.mergePendingUpdates()
+
+	srcInterval := l.size
+	dstInterval := finish.Sub(start) / time.Duration(num)
+	dstStart := start
+	srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
+
+	srcIndex := 0
+
+	// Where should scanning start?
+	if dstStart.After(srcStart) {
+		advance := int(dstStart.Sub(srcStart) / srcInterval)
+		srcIndex += advance
+		srcStart = srcStart.Add(time.Duration(advance) * srcInterval)
+	}
+
+	// The i'th value is computed as show below.
+	// interval = (finish/start)/num
+	// i'th value = sum of observation in range
+	//   [ start + i       * interval,
+	//     start + (i + 1) * interval )
+	for i := 0; i < num; i++ {
+		results[i] = ts.resetObservation(results[i])
+		dstEnd := dstStart.Add(dstInterval)
+		for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
+			srcEnd := srcStart.Add(srcInterval)
+			if srcEnd.After(ts.lastAdd) {
+				srcEnd = ts.lastAdd
+			}
+
+			if !srcEnd.Before(dstStart) {
+				srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
+				if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
+					// dst completely contains src.
+					if srcValue != nil {
+						results[i].Add(srcValue)
+					}
+				} else {
+					// dst partially overlaps src.
+					overlapStart := maxTime(srcStart, dstStart)
+					overlapEnd := minTime(srcEnd, dstEnd)
+					base := srcEnd.Sub(srcStart)
+					fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
+
+					used := ts.provider()
+					if srcValue != nil {
+						used.CopyFrom(srcValue)
+					}
+					used.Multiply(fraction)
+					results[i].Add(used)
+				}
+
+				if srcEnd.After(dstEnd) {
+					break
+				}
+			}
+			srcIndex++
+			srcStart = srcStart.Add(srcInterval)
+		}
+		dstStart = dstStart.Add(dstInterval)
+	}
+}
+
+// resetObservation clears the content so the struct may be reused.
+func (ts *timeSeries) resetObservation(observation Observable) Observable {
+	if observation == nil {
+		observation = ts.provider()
+	} else {
+		observation.Clear()
+	}
+	return observation
+}
+
+// TimeSeries tracks data at granularities from 1 second to 16 weeks.
+type TimeSeries struct {
+	timeSeries
+}
+
+// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
+func NewTimeSeries(f func() Observable) *TimeSeries {
+	return NewTimeSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
+	ts := new(TimeSeries)
+	ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
+	return ts
+}
+
+// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
+type MinuteHourSeries struct {
+	timeSeries
+}
+
+// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
+func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
+	return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
+	ts := new(MinuteHourSeries)
+	ts.timeSeries.init(minuteHourSeriesResolutions, f,
+		minuteHourSeriesNumBuckets, clock)
+	return ts
+}
+
+func (ts *MinuteHourSeries) Minute() Observable {
+	return ts.timeSeries.Latest(0, 60)
+}
+
+func (ts *MinuteHourSeries) Hour() Observable {
+	return ts.timeSeries.Latest(1, 60)
+}
+
+func minTime(a, b time.Time) time.Time {
+	if a.Before(b) {
+		return a
+	}
+	return b
+}
+
+func maxTime(a, b time.Time) time.Time {
+	if a.After(b) {
+		return a
+	}
+	return b
+}
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
new file mode 100644
index 000000000..c646a6952
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -0,0 +1,532 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+	"bytes"
+	"fmt"
+	"html/template"
+	"io"
+	"log"
+	"net/http"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"text/tabwriter"
+	"time"
+)
+
+const maxEventsPerLog = 100
+
+type bucket struct {
+	MaxErrAge time.Duration
+	String    string
+}
+
+var buckets = []bucket{
+	{0, "total"},
+	{10 * time.Second, "errs<10s"},
+	{1 * time.Minute, "errs<1m"},
+	{10 * time.Minute, "errs<10m"},
+	{1 * time.Hour, "errs<1h"},
+	{10 * time.Hour, "errs<10h"},
+	{24000 * time.Hour, "errors"},
+}
+
+// RenderEvents renders the HTML page typically served at /debug/events.
+// It does not do any auth checking. The request may be nil.
+//
+// Most users will use the Events handler.
+func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
+	now := time.Now()
+	data := &struct {
+		Families []string // family names
+		Buckets  []bucket
+		Counts   [][]int // eventLog count per family/bucket
+
+		// Set when a bucket has been selected.
+		Family    string
+		Bucket    int
+		EventLogs eventLogs
+		Expanded  bool
+	}{
+		Buckets: buckets,
+	}
+
+	data.Families = make([]string, 0, len(families))
+	famMu.RLock()
+	for name := range families {
+		data.Families = append(data.Families, name)
+	}
+	famMu.RUnlock()
+	sort.Strings(data.Families)
+
+	// Count the number of eventLogs in each family for each error age.
+	data.Counts = make([][]int, len(data.Families))
+	for i, name := range data.Families {
+		// TODO(sameer): move this loop under the family lock.
+		f := getEventFamily(name)
+		data.Counts[i] = make([]int, len(data.Buckets))
+		for j, b := range data.Buckets {
+			data.Counts[i][j] = f.Count(now, b.MaxErrAge)
+		}
+	}
+
+	if req != nil {
+		var ok bool
+		data.Family, data.Bucket, ok = parseEventsArgs(req)
+		if !ok {
+			// No-op
+		} else {
+			data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
+		}
+		if data.EventLogs != nil {
+			defer data.EventLogs.Free()
+			sort.Sort(data.EventLogs)
+		}
+		if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+			data.Expanded = exp
+		}
+	}
+
+	famMu.RLock()
+	defer famMu.RUnlock()
+	if err := eventsTmpl().Execute(w, data); err != nil {
+		log.Printf("net/trace: Failed executing template: %v", err)
+	}
+}
+
+func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
+	fam, bStr := req.FormValue("fam"), req.FormValue("b")
+	if fam == "" || bStr == "" {
+		return "", 0, false
+	}
+	b, err := strconv.Atoi(bStr)
+	if err != nil || b < 0 || b >= len(buckets) {
+		return "", 0, false
+	}
+	return fam, b, true
+}
+
+// An EventLog provides a log of events associated with a specific object.
+type EventLog interface {
+	// Printf formats its arguments with fmt.Sprintf and adds the
+	// result to the event log.
+	Printf(format string, a ...interface{})
+
+	// Errorf is like Printf, but it marks this event as an error.
+	Errorf(format string, a ...interface{})
+
+	// Finish declares that this event log is complete.
+	// The event log should not be used after calling this method.
+	Finish()
+}
+
+// NewEventLog returns a new EventLog with the specified family name
+// and title.
+func NewEventLog(family, title string) EventLog {
+	el := newEventLog()
+	el.ref()
+	el.Family, el.Title = family, title
+	el.Start = time.Now()
+	el.events = make([]logEntry, 0, maxEventsPerLog)
+	el.stack = make([]uintptr, 32)
+	n := runtime.Callers(2, el.stack)
+	el.stack = el.stack[:n]
+
+	getEventFamily(family).add(el)
+	return el
+}
+
+func (el *eventLog) Finish() {
+	getEventFamily(el.Family).remove(el)
+	el.unref() // matches ref in New
+}
+
+var (
+	famMu    sync.RWMutex
+	families = make(map[string]*eventFamily) // family name => family
+)
+
+func getEventFamily(fam string) *eventFamily {
+	famMu.Lock()
+	defer famMu.Unlock()
+	f := families[fam]
+	if f == nil {
+		f = &eventFamily{}
+		families[fam] = f
+	}
+	return f
+}
+
+type eventFamily struct {
+	mu        sync.RWMutex
+	eventLogs eventLogs
+}
+
+func (f *eventFamily) add(el *eventLog) {
+	f.mu.Lock()
+	f.eventLogs = append(f.eventLogs, el)
+	f.mu.Unlock()
+}
+
+func (f *eventFamily) remove(el *eventLog) {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	for i, el0 := range f.eventLogs {
+		if el == el0 {
+			copy(f.eventLogs[i:], f.eventLogs[i+1:])
+			f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
+			return
+		}
+	}
+}
+
+func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
+	f.mu.RLock()
+	defer f.mu.RUnlock()
+	for _, el := range f.eventLogs {
+		if el.hasRecentError(now, maxErrAge) {
+			n++
+		}
+	}
+	return
+}
+
+func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
+	f.mu.RLock()
+	defer f.mu.RUnlock()
+	els = make(eventLogs, 0, len(f.eventLogs))
+	for _, el := range f.eventLogs {
+		if el.hasRecentError(now, maxErrAge) {
+			el.ref()
+			els = append(els, el)
+		}
+	}
+	return
+}
+
+type eventLogs []*eventLog
+
+// Free calls unref on each element of the list.
+func (els eventLogs) Free() {
+	for _, el := range els {
+		el.unref()
+	}
+}
+
+// eventLogs may be sorted in reverse chronological order.
+func (els eventLogs) Len() int           { return len(els) }
+func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
+func (els eventLogs) Swap(i, j int)      { els[i], els[j] = els[j], els[i] }
+
+// A logEntry is a timestamped log entry in an event log.
+type logEntry struct {
+	When    time.Time
+	Elapsed time.Duration // since previous event in log
+	NewDay  bool          // whether this event is on a different day to the previous event
+	What    string
+	IsErr   bool
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e logEntry) WhenString() string {
+	if e.NewDay {
+		return e.When.Format("2006/01/02 15:04:05.000000")
+	}
+	return e.When.Format("15:04:05.000000")
+}
+
+// An eventLog represents an active event log.
+type eventLog struct {
+	// Family is the top-level grouping of event logs to which this belongs.
+	Family string
+
+	// Title is the title of this event log.
+	Title string
+
+	// Timing information.
+	Start time.Time
+
+	// Call stack where this event log was created.
+	stack []uintptr
+
+	// Append-only sequence of events.
+	//
+	// TODO(sameer): change this to a ring buffer to avoid the array copy
+	// when we hit maxEventsPerLog.
+	mu            sync.RWMutex
+	events        []logEntry
+	LastErrorTime time.Time
+	discarded     int
+
+	refs int32 // how many buckets this is in
+}
+
+func (el *eventLog) reset() {
+	// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+	el.Family = ""
+	el.Title = ""
+	el.Start = time.Time{}
+	el.stack = nil
+	el.events = nil
+	el.LastErrorTime = time.Time{}
+	el.discarded = 0
+	el.refs = 0
+}
+
+func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
+	if maxErrAge == 0 {
+		return true
+	}
+	el.mu.RLock()
+	defer el.mu.RUnlock()
+	return now.Sub(el.LastErrorTime) < maxErrAge
+}
+
+// delta returns the elapsed time since the last event or the log start,
+// and whether it spans midnight.
+// L >= el.mu
+func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
+	if len(el.events) == 0 {
+		return t.Sub(el.Start), false
+	}
+	prev := el.events[len(el.events)-1].When
+	return t.Sub(prev), prev.Day() != t.Day()
+
+}
+
+func (el *eventLog) Printf(format string, a ...interface{}) {
+	el.printf(false, format, a...)
+}
+
+func (el *eventLog) Errorf(format string, a ...interface{}) {
+	el.printf(true, format, a...)
+}
+
+func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
+	e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
+	el.mu.Lock()
+	e.Elapsed, e.NewDay = el.delta(e.When)
+	if len(el.events) < maxEventsPerLog {
+		el.events = append(el.events, e)
+	} else {
+		// Discard the oldest event.
+		if el.discarded == 0 {
+			// el.discarded starts at two to count for the event it
+			// is replacing, plus the next one that we are about to
+			// drop.
+			el.discarded = 2
+		} else {
+			el.discarded++
+		}
+		// TODO(sameer): if this causes allocations on a critical path,
+		// change eventLog.What to be a fmt.Stringer, as in trace.go.
+		el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
+		// The timestamp of the discarded meta-event should be
+		// the time of the last event it is representing.
+		el.events[0].When = el.events[1].When
+		copy(el.events[1:], el.events[2:])
+		el.events[maxEventsPerLog-1] = e
+	}
+	if e.IsErr {
+		el.LastErrorTime = e.When
+	}
+	el.mu.Unlock()
+}
+
+func (el *eventLog) ref() {
+	atomic.AddInt32(&el.refs, 1)
+}
+
+func (el *eventLog) unref() {
+	if atomic.AddInt32(&el.refs, -1) == 0 {
+		freeEventLog(el)
+	}
+}
+
+func (el *eventLog) When() string {
+	return el.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (el *eventLog) ElapsedTime() string {
+	elapsed := time.Since(el.Start)
+	return fmt.Sprintf("%.6f", elapsed.Seconds())
+}
+
+func (el *eventLog) Stack() string {
+	buf := new(bytes.Buffer)
+	tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
+	printStackRecord(tw, el.stack)
+	tw.Flush()
+	return buf.String()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+// Adapted from runtime/pprof/pprof.go.
+func printStackRecord(w io.Writer, stk []uintptr) {
+	for _, pc := range stk {
+		f := runtime.FuncForPC(pc)
+		if f == nil {
+			continue
+		}
+		file, line := f.FileLine(pc)
+		name := f.Name()
+		// Hide runtime.goexit and any runtime functions at the beginning.
+		if strings.HasPrefix(name, "runtime.") {
+			continue
+		}
+		fmt.Fprintf(w, "#   %s\t%s:%d\n", name, file, line)
+	}
+}
+
+func (el *eventLog) Events() []logEntry {
+	el.mu.RLock()
+	defer el.mu.RUnlock()
+	return el.events
+}
+
+// freeEventLogs is a freelist of *eventLog
+var freeEventLogs = make(chan *eventLog, 1000)
+
+// newEventLog returns a event log ready to use.
+func newEventLog() *eventLog {
+	select {
+	case el := <-freeEventLogs:
+		return el
+	default:
+		return new(eventLog)
+	}
+}
+
+// freeEventLog adds el to freeEventLogs if there's room.
+// This is non-blocking.
+func freeEventLog(el *eventLog) {
+	el.reset()
+	select {
+	case freeEventLogs <- el:
+	default:
+	}
+}
+
+var eventsTmplCache *template.Template
+var eventsTmplOnce sync.Once
+
+func eventsTmpl() *template.Template {
+	eventsTmplOnce.Do(func() {
+		eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
+			"elapsed":   elapsed,
+			"trimSpace": strings.TrimSpace,
+		}).Parse(eventsHTML))
+	})
+	return eventsTmplCache
+}
+
+const eventsHTML = `
+<html>
+	<head>
+		<title>events</title>
+	</head>
+	<style type="text/css">
+		body {
+			font-family: sans-serif;
+		}
+		table#req-status td.family {
+			padding-right: 2em;
+		}
+		table#req-status td.active {
+			padding-right: 1em;
+		}
+		table#req-status td.empty {
+			color: #aaa;
+		}
+		table#reqs {
+			margin-top: 1em;
+		}
+		table#reqs tr.first {
+			{{if $.Expanded}}font-weight: bold;{{end}}
+		}
+		table#reqs td {
+			font-family: monospace;
+		}
+		table#reqs td.when {
+			text-align: right;
+			white-space: nowrap;
+		}
+		table#reqs td.elapsed {
+			padding: 0 0.5em;
+			text-align: right;
+			white-space: pre;
+			width: 10em;
+		}
+		address {
+			font-size: smaller;
+			margin-top: 5em;
+		}
+	</style>
+	<body>
+
+<h1>/debug/events</h1>
+
+<table id="req-status">
+	{{range $i, $fam := .Families}}
+	<tr>
+		<td class="family">{{$fam}}</td>
+
+	        {{range $j, $bucket := $.Buckets}}
+	        {{$n := index $.Counts $i $j}}
+		<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
+	                {{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+		        [{{$n}} {{$bucket.String}}]
+			{{if $n}}</a>{{end}}
+		</td>
+                {{end}}
+
+	</tr>{{end}}
+</table>
+
+{{if $.EventLogs}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
+[Summary]{{if $.Expanded}}</a>{{end}}
+
+{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
+[Expanded]{{if not $.Expanded}}</a>{{end}}
+
+<table id="reqs">
+	<tr><th>When</th><th>Elapsed</th></tr>
+	{{range $el := $.EventLogs}}
+	<tr class="first">
+		<td class="when">{{$el.When}}</td>
+		<td class="elapsed">{{$el.ElapsedTime}}</td>
+		<td>{{$el.Title}}
+	</tr>
+	{{if $.Expanded}}
+	<tr>
+		<td class="when"></td>
+		<td class="elapsed"></td>
+		<td><pre>{{$el.Stack|trimSpace}}</pre></td>
+	</tr>
+	{{range $el.Events}}
+	<tr>
+		<td class="when">{{.WhenString}}</td>
+		<td class="elapsed">{{elapsed .Elapsed}}</td>
+		<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
+	</tr>
+	{{end}}
+	{{end}}
+	{{end}}
+</table>
+{{end}}
+	</body>
+</html>
+`
diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go
new file mode 100644
index 000000000..d6c71101e
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/histogram.go
@@ -0,0 +1,365 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+// This file implements histogramming for RPC statistics collection.
+
+import (
+	"bytes"
+	"fmt"
+	"html/template"
+	"log"
+	"math"
+	"sync"
+
+	"golang.org/x/net/internal/timeseries"
+)
+
+const (
+	bucketCount = 38
+)
+
+// histogram keeps counts of values in buckets that are spaced
+// out in powers of 2: 0-1, 2-3, 4-7...
+// histogram implements timeseries.Observable
+type histogram struct {
+	sum          int64   // running total of measurements
+	sumOfSquares float64 // square of running total
+	buckets      []int64 // bucketed values for histogram
+	value        int     // holds a single value as an optimization
+	valueCount   int64   // number of values recorded for single value
+}
+
+// addMeasurement records a value measurement observation to the histogram.
+func (h *histogram) addMeasurement(value int64) {
+	// TODO: assert invariant
+	h.sum += value
+	h.sumOfSquares += float64(value) * float64(value)
+
+	bucketIndex := getBucket(value)
+
+	if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
+		h.value = bucketIndex
+		h.valueCount++
+	} else {
+		h.allocateBuckets()
+		h.buckets[bucketIndex]++
+	}
+}
+
+func (h *histogram) allocateBuckets() {
+	if h.buckets == nil {
+		h.buckets = make([]int64, bucketCount)
+		h.buckets[h.value] = h.valueCount
+		h.value = 0
+		h.valueCount = -1
+	}
+}
+
+func log2(i int64) int {
+	n := 0
+	for ; i >= 0x100; i >>= 8 {
+		n += 8
+	}
+	for ; i > 0; i >>= 1 {
+		n += 1
+	}
+	return n
+}
+
+func getBucket(i int64) (index int) {
+	index = log2(i) - 1
+	if index < 0 {
+		index = 0
+	}
+	if index >= bucketCount {
+		index = bucketCount - 1
+	}
+	return
+}
+
+// Total returns the number of recorded observations.
+func (h *histogram) total() (total int64) {
+	if h.valueCount >= 0 {
+		total = h.valueCount
+	}
+	for _, val := range h.buckets {
+		total += int64(val)
+	}
+	return
+}
+
+// Average returns the average value of recorded observations.
+func (h *histogram) average() float64 {
+	t := h.total()
+	if t == 0 {
+		return 0
+	}
+	return float64(h.sum) / float64(t)
+}
+
+// Variance returns the variance of recorded observations.
+func (h *histogram) variance() float64 {
+	t := float64(h.total())
+	if t == 0 {
+		return 0
+	}
+	s := float64(h.sum) / t
+	return h.sumOfSquares/t - s*s
+}
+
+// StandardDeviation returns the standard deviation of recorded observations.
+func (h *histogram) standardDeviation() float64 {
+	return math.Sqrt(h.variance())
+}
+
+// PercentileBoundary estimates the value that the given fraction of recorded
+// observations are less than.
+func (h *histogram) percentileBoundary(percentile float64) int64 {
+	total := h.total()
+
+	// Corner cases (make sure result is strictly less than Total())
+	if total == 0 {
+		return 0
+	} else if total == 1 {
+		return int64(h.average())
+	}
+
+	percentOfTotal := round(float64(total) * percentile)
+	var runningTotal int64
+
+	for i := range h.buckets {
+		value := h.buckets[i]
+		runningTotal += value
+		if runningTotal == percentOfTotal {
+			// We hit an exact bucket boundary. If the next bucket has data, it is a
+			// good estimate of the value. If the bucket is empty, we interpolate the
+			// midpoint between the next bucket's boundary and the next non-zero
+			// bucket. If the remaining buckets are all empty, then we use the
+			// boundary for the next bucket as the estimate.
+			j := uint8(i + 1)
+			min := bucketBoundary(j)
+			if runningTotal < total {
+				for h.buckets[j] == 0 {
+					j++
+				}
+			}
+			max := bucketBoundary(j)
+			return min + round(float64(max-min)/2)
+		} else if runningTotal > percentOfTotal {
+			// The value is in this bucket. Interpolate the value.
+			delta := runningTotal - percentOfTotal
+			percentBucket := float64(value-delta) / float64(value)
+			bucketMin := bucketBoundary(uint8(i))
+			nextBucketMin := bucketBoundary(uint8(i + 1))
+			bucketSize := nextBucketMin - bucketMin
+			return bucketMin + round(percentBucket*float64(bucketSize))
+		}
+	}
+	return bucketBoundary(bucketCount - 1)
+}
+
+// Median returns the estimated median of the observed values.
+func (h *histogram) median() int64 {
+	return h.percentileBoundary(0.5)
+}
+
+// Add adds other to h.
+func (h *histogram) Add(other timeseries.Observable) {
+	o := other.(*histogram)
+	if o.valueCount == 0 {
+		// Other histogram is empty
+	} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
+		// Both have a single bucketed value, aggregate them
+		h.valueCount += o.valueCount
+	} else {
+		// Two different values necessitate buckets in this histogram
+		h.allocateBuckets()
+		if o.valueCount >= 0 {
+			h.buckets[o.value] += o.valueCount
+		} else {
+			for i := range h.buckets {
+				h.buckets[i] += o.buckets[i]
+			}
+		}
+	}
+	h.sumOfSquares += o.sumOfSquares
+	h.sum += o.sum
+}
+
+// Clear resets the histogram to an empty state, removing all observed values.
+func (h *histogram) Clear() {
+	h.buckets = nil
+	h.value = 0
+	h.valueCount = 0
+	h.sum = 0
+	h.sumOfSquares = 0
+}
+
+// CopyFrom copies from other, which must be a *histogram, into h.
+func (h *histogram) CopyFrom(other timeseries.Observable) {
+	o := other.(*histogram)
+	if o.valueCount == -1 {
+		h.allocateBuckets()
+		copy(h.buckets, o.buckets)
+	}
+	h.sum = o.sum
+	h.sumOfSquares = o.sumOfSquares
+	h.value = o.value
+	h.valueCount = o.valueCount
+}
+
+// Multiply scales the histogram by the specified ratio.
+func (h *histogram) Multiply(ratio float64) {
+	if h.valueCount == -1 {
+		for i := range h.buckets {
+			h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
+		}
+	} else {
+		h.valueCount = int64(float64(h.valueCount) * ratio)
+	}
+	h.sum = int64(float64(h.sum) * ratio)
+	h.sumOfSquares = h.sumOfSquares * ratio
+}
+
+// New creates a new histogram.
+func (h *histogram) New() timeseries.Observable {
+	r := new(histogram)
+	r.Clear()
+	return r
+}
+
+func (h *histogram) String() string {
+	return fmt.Sprintf("%d, %f, %d, %d, %v",
+		h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
+}
+
+// round returns the closest int64 to the argument
+func round(in float64) int64 {
+	return int64(math.Floor(in + 0.5))
+}
+
+// bucketBoundary returns the first value in the bucket.
+func bucketBoundary(bucket uint8) int64 {
+	if bucket == 0 {
+		return 0
+	}
+	return 1 << bucket
+}
+
+// bucketData holds data about a specific bucket for use in distTmpl.
+type bucketData struct {
+	Lower, Upper       int64
+	N                  int64
+	Pct, CumulativePct float64
+	GraphWidth         int
+}
+
+// data holds data about a Distribution for use in distTmpl.
+type data struct {
+	Buckets                 []*bucketData
+	Count, Median           int64
+	Mean, StandardDeviation float64
+}
+
+// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
+const maxHTMLBarWidth = 350.0
+
+// newData returns data representing h for use in distTmpl.
+func (h *histogram) newData() *data {
+	// Force the allocation of buckets to simplify the rendering implementation
+	h.allocateBuckets()
+	// We scale the bars on the right so that the largest bar is
+	// maxHTMLBarWidth pixels in width.
+	maxBucket := int64(0)
+	for _, n := range h.buckets {
+		if n > maxBucket {
+			maxBucket = n
+		}
+	}
+	total := h.total()
+	barsizeMult := maxHTMLBarWidth / float64(maxBucket)
+	var pctMult float64
+	if total == 0 {
+		pctMult = 1.0
+	} else {
+		pctMult = 100.0 / float64(total)
+	}
+
+	buckets := make([]*bucketData, len(h.buckets))
+	runningTotal := int64(0)
+	for i, n := range h.buckets {
+		if n == 0 {
+			continue
+		}
+		runningTotal += n
+		var upperBound int64
+		if i < bucketCount-1 {
+			upperBound = bucketBoundary(uint8(i + 1))
+		} else {
+			upperBound = math.MaxInt64
+		}
+		buckets[i] = &bucketData{
+			Lower:         bucketBoundary(uint8(i)),
+			Upper:         upperBound,
+			N:             n,
+			Pct:           float64(n) * pctMult,
+			CumulativePct: float64(runningTotal) * pctMult,
+			GraphWidth:    int(float64(n) * barsizeMult),
+		}
+	}
+	return &data{
+		Buckets:           buckets,
+		Count:             total,
+		Median:            h.median(),
+		Mean:              h.average(),
+		StandardDeviation: h.standardDeviation(),
+	}
+}
+
+func (h *histogram) html() template.HTML {
+	buf := new(bytes.Buffer)
+	if err := distTmpl().Execute(buf, h.newData()); err != nil {
+		buf.Reset()
+		log.Printf("net/trace: couldn't execute template: %v", err)
+	}
+	return template.HTML(buf.String())
+}
+
+var distTmplCache *template.Template
+var distTmplOnce sync.Once
+
+func distTmpl() *template.Template {
+	distTmplOnce.Do(func() {
+		// Input: data
+		distTmplCache = template.Must(template.New("distTmpl").Parse(`
+<table>
+<tr>
+    <td style="padding:0.25em">Count: {{.Count}}</td>
+    <td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
+    <td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
+    <td style="padding:0.25em">Median: {{.Median}}</td>
+</tr>
+</table>
+<hr>
+<table>
+{{range $b := .Buckets}}
+{{if $b}}
+  <tr>
+    <td style="padding:0 0 0 0.25em">[</td>
+    <td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
+    <td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
+    <td style="text-align:right;padding:0 0.25em">{{.N}}</td>
+    <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
+    <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
+    <td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
+  </tr>
+{{end}}
+{{end}}
+</table>
+`))
+	})
+	return distTmplCache
+}
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
new file mode 100644
index 000000000..eae2a99f5
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -0,0 +1,1130 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package trace implements tracing of requests and long-lived objects.
+It exports HTTP interfaces on /debug/requests and /debug/events.
+
+A trace.Trace provides tracing for short-lived objects, usually requests.
+A request handler might be implemented like this:
+
+	func fooHandler(w http.ResponseWriter, req *http.Request) {
+		tr := trace.New("mypkg.Foo", req.URL.Path)
+		defer tr.Finish()
+		...
+		tr.LazyPrintf("some event %q happened", str)
+		...
+		if err := somethingImportant(); err != nil {
+			tr.LazyPrintf("somethingImportant failed: %v", err)
+			tr.SetError()
+		}
+	}
+
+The /debug/requests HTTP endpoint organizes the traces by family,
+errors, and duration.  It also provides histogram of request duration
+for each family.
+
+A trace.EventLog provides tracing for long-lived objects, such as RPC
+connections.
+
+	// A Fetcher fetches URL paths for a single domain.
+	type Fetcher struct {
+		domain string
+		events trace.EventLog
+	}
+
+	func NewFetcher(domain string) *Fetcher {
+		return &Fetcher{
+			domain,
+			trace.NewEventLog("mypkg.Fetcher", domain),
+		}
+	}
+
+	func (f *Fetcher) Fetch(path string) (string, error) {
+		resp, err := http.Get("http://" + f.domain + "/" + path)
+		if err != nil {
+			f.events.Errorf("Get(%q) = %v", path, err)
+			return "", err
+		}
+		f.events.Printf("Get(%q) = %s", path, resp.Status)
+		...
+	}
+
+	func (f *Fetcher) Close() error {
+		f.events.Finish()
+		return nil
+	}
+
+The /debug/events HTTP endpoint organizes the event logs by family and
+by time since the last error.  The expanded view displays recent log
+entries and the log's call stack.
+*/
+package trace // import "golang.org/x/net/trace"
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"html/template"
+	"io"
+	"log"
+	"net"
+	"net/http"
+	"net/url"
+	"runtime"
+	"sort"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/internal/timeseries"
+)
+
+// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
+// FOR DEBUGGING ONLY. This will slow down the program.
+var DebugUseAfterFinish = false
+
+// HTTP ServeMux paths.
+const (
+	debugRequestsPath = "/debug/requests"
+	debugEventsPath   = "/debug/events"
+)
+
+// AuthRequest determines whether a specific request is permitted to load the
+// /debug/requests or /debug/events pages.
+//
+// It returns two bools; the first indicates whether the page may be viewed at all,
+// and the second indicates whether sensitive events will be shown.
+//
+// AuthRequest may be replaced by a program to customize its authorization requirements.
+//
+// The default AuthRequest function returns (true, true) if and only if the request
+// comes from localhost/127.0.0.1/[::1].
+var AuthRequest = func(req *http.Request) (any, sensitive bool) {
+	// RemoteAddr is commonly in the form "IP" or "IP:port".
+	// If it is in the form "IP:port", split off the port.
+	host, _, err := net.SplitHostPort(req.RemoteAddr)
+	if err != nil {
+		host = req.RemoteAddr
+	}
+	switch host {
+	case "localhost", "127.0.0.1", "::1":
+		return true, true
+	default:
+		return false, false
+	}
+}
+
+func init() {
+	_, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}})
+	if pat == debugRequestsPath {
+		panic("/debug/requests is already registered. You may have two independent copies of " +
+			"golang.org/x/net/trace in your binary, trying to maintain separate state. This may " +
+			"involve a vendored copy of golang.org/x/net/trace.")
+	}
+
+	// TODO(jbd): Serve Traces from /debug/traces in the future?
+	// There is no requirement for a request to be present to have traces.
+	http.HandleFunc(debugRequestsPath, Traces)
+	http.HandleFunc(debugEventsPath, Events)
+}
+
+// NewContext returns a copy of the parent context
+// and associates it with a Trace.
+func NewContext(ctx context.Context, tr Trace) context.Context {
+	return context.WithValue(ctx, contextKey, tr)
+}
+
+// FromContext returns the Trace bound to the context, if any.
+func FromContext(ctx context.Context) (tr Trace, ok bool) {
+	tr, ok = ctx.Value(contextKey).(Trace)
+	return
+}
+
+// Traces responds with traces from the program.
+// The package initialization registers it in http.DefaultServeMux
+// at /debug/requests.
+//
+// It performs authorization by running AuthRequest.
+func Traces(w http.ResponseWriter, req *http.Request) {
+	any, sensitive := AuthRequest(req)
+	if !any {
+		http.Error(w, "not allowed", http.StatusUnauthorized)
+		return
+	}
+	w.Header().Set("Content-Type", "text/html; charset=utf-8")
+	Render(w, req, sensitive)
+}
+
+// Events responds with a page of events collected by EventLogs.
+// The package initialization registers it in http.DefaultServeMux
+// at /debug/events.
+//
+// It performs authorization by running AuthRequest.
+func Events(w http.ResponseWriter, req *http.Request) {
+	any, sensitive := AuthRequest(req)
+	if !any {
+		http.Error(w, "not allowed", http.StatusUnauthorized)
+		return
+	}
+	w.Header().Set("Content-Type", "text/html; charset=utf-8")
+	RenderEvents(w, req, sensitive)
+}
+
+// Render renders the HTML page typically served at /debug/requests.
+// It does not do any auth checking. The request may be nil.
+//
+// Most users will use the Traces handler.
+func Render(w io.Writer, req *http.Request, sensitive bool) {
+	data := &struct {
+		Families         []string
+		ActiveTraceCount map[string]int
+		CompletedTraces  map[string]*family
+
+		// Set when a bucket has been selected.
+		Traces        traceList
+		Family        string
+		Bucket        int
+		Expanded      bool
+		Traced        bool
+		Active        bool
+		ShowSensitive bool // whether to show sensitive events
+
+		Histogram       template.HTML
+		HistogramWindow string // e.g. "last minute", "last hour", "all time"
+
+		// If non-zero, the set of traces is a partial set,
+		// and this is the total number.
+		Total int
+	}{
+		CompletedTraces: completedTraces,
+	}
+
+	data.ShowSensitive = sensitive
+	if req != nil {
+		// Allow show_sensitive=0 to force hiding of sensitive data for testing.
+		// This only goes one way; you can't use show_sensitive=1 to see things.
+		if req.FormValue("show_sensitive") == "0" {
+			data.ShowSensitive = false
+		}
+
+		if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+			data.Expanded = exp
+		}
+		if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
+			data.Traced = exp
+		}
+	}
+
+	completedMu.RLock()
+	data.Families = make([]string, 0, len(completedTraces))
+	for fam := range completedTraces {
+		data.Families = append(data.Families, fam)
+	}
+	completedMu.RUnlock()
+	sort.Strings(data.Families)
+
+	// We are careful here to minimize the time spent locking activeMu,
+	// since that lock is required every time an RPC starts and finishes.
+	data.ActiveTraceCount = make(map[string]int, len(data.Families))
+	activeMu.RLock()
+	for fam, s := range activeTraces {
+		data.ActiveTraceCount[fam] = s.Len()
+	}
+	activeMu.RUnlock()
+
+	var ok bool
+	data.Family, data.Bucket, ok = parseArgs(req)
+	switch {
+	case !ok:
+		// No-op
+	case data.Bucket == -1:
+		data.Active = true
+		n := data.ActiveTraceCount[data.Family]
+		data.Traces = getActiveTraces(data.Family)
+		if len(data.Traces) < n {
+			data.Total = n
+		}
+	case data.Bucket < bucketsPerFamily:
+		if b := lookupBucket(data.Family, data.Bucket); b != nil {
+			data.Traces = b.Copy(data.Traced)
+		}
+	default:
+		if f := getFamily(data.Family, false); f != nil {
+			var obs timeseries.Observable
+			f.LatencyMu.RLock()
+			switch o := data.Bucket - bucketsPerFamily; o {
+			case 0:
+				obs = f.Latency.Minute()
+				data.HistogramWindow = "last minute"
+			case 1:
+				obs = f.Latency.Hour()
+				data.HistogramWindow = "last hour"
+			case 2:
+				obs = f.Latency.Total()
+				data.HistogramWindow = "all time"
+			}
+			f.LatencyMu.RUnlock()
+			if obs != nil {
+				data.Histogram = obs.(*histogram).html()
+			}
+		}
+	}
+
+	if data.Traces != nil {
+		defer data.Traces.Free()
+		sort.Sort(data.Traces)
+	}
+
+	completedMu.RLock()
+	defer completedMu.RUnlock()
+	if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil {
+		log.Printf("net/trace: Failed executing template: %v", err)
+	}
+}
+
+func parseArgs(req *http.Request) (fam string, b int, ok bool) {
+	if req == nil {
+		return "", 0, false
+	}
+	fam, bStr := req.FormValue("fam"), req.FormValue("b")
+	if fam == "" || bStr == "" {
+		return "", 0, false
+	}
+	b, err := strconv.Atoi(bStr)
+	if err != nil || b < -1 {
+		return "", 0, false
+	}
+
+	return fam, b, true
+}
+
+func lookupBucket(fam string, b int) *traceBucket {
+	f := getFamily(fam, false)
+	if f == nil || b < 0 || b >= len(f.Buckets) {
+		return nil
+	}
+	return f.Buckets[b]
+}
+
+type contextKeyT string
+
+var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
+
+// Trace represents an active request.
+type Trace interface {
+	// LazyLog adds x to the event log. It will be evaluated each time the
+	// /debug/requests page is rendered. Any memory referenced by x will be
+	// pinned until the trace is finished and later discarded.
+	LazyLog(x fmt.Stringer, sensitive bool)
+
+	// LazyPrintf evaluates its arguments with fmt.Sprintf each time the
+	// /debug/requests page is rendered. Any memory referenced by a will be
+	// pinned until the trace is finished and later discarded.
+	LazyPrintf(format string, a ...interface{})
+
+	// SetError declares that this trace resulted in an error.
+	SetError()
+
+	// SetRecycler sets a recycler for the trace.
+	// f will be called for each event passed to LazyLog at a time when
+	// it is no longer required, whether while the trace is still active
+	// and the event is discarded, or when a completed trace is discarded.
+	SetRecycler(f func(interface{}))
+
+	// SetTraceInfo sets the trace info for the trace.
+	// This is currently unused.
+	SetTraceInfo(traceID, spanID uint64)
+
+	// SetMaxEvents sets the maximum number of events that will be stored
+	// in the trace. This has no effect if any events have already been
+	// added to the trace.
+	SetMaxEvents(m int)
+
+	// Finish declares that this trace is complete.
+	// The trace should not be used after calling this method.
+	Finish()
+}
+
+type lazySprintf struct {
+	format string
+	a      []interface{}
+}
+
+func (l *lazySprintf) String() string {
+	return fmt.Sprintf(l.format, l.a...)
+}
+
+// New returns a new Trace with the specified family and title.
+func New(family, title string) Trace {
+	tr := newTrace()
+	tr.ref()
+	tr.Family, tr.Title = family, title
+	tr.Start = time.Now()
+	tr.maxEvents = maxEventsPerTrace
+	tr.events = tr.eventsBuf[:0]
+
+	activeMu.RLock()
+	s := activeTraces[tr.Family]
+	activeMu.RUnlock()
+	if s == nil {
+		activeMu.Lock()
+		s = activeTraces[tr.Family] // check again
+		if s == nil {
+			s = new(traceSet)
+			activeTraces[tr.Family] = s
+		}
+		activeMu.Unlock()
+	}
+	s.Add(tr)
+
+	// Trigger allocation of the completed trace structure for this family.
+	// This will cause the family to be present in the request page during
+	// the first trace of this family. We don't care about the return value,
+	// nor is there any need for this to run inline, so we execute it in its
+	// own goroutine, but only if the family isn't allocated yet.
+	completedMu.RLock()
+	if _, ok := completedTraces[tr.Family]; !ok {
+		go allocFamily(tr.Family)
+	}
+	completedMu.RUnlock()
+
+	return tr
+}
+
+func (tr *trace) Finish() {
+	elapsed := time.Since(tr.Start)
+	tr.mu.Lock()
+	tr.Elapsed = elapsed
+	tr.mu.Unlock()
+
+	if DebugUseAfterFinish {
+		buf := make([]byte, 4<<10) // 4 KB should be enough
+		n := runtime.Stack(buf, false)
+		tr.finishStack = buf[:n]
+	}
+
+	activeMu.RLock()
+	m := activeTraces[tr.Family]
+	activeMu.RUnlock()
+	m.Remove(tr)
+
+	f := getFamily(tr.Family, true)
+	tr.mu.RLock() // protects tr fields in Cond.match calls
+	for _, b := range f.Buckets {
+		if b.Cond.match(tr) {
+			b.Add(tr)
+		}
+	}
+	tr.mu.RUnlock()
+
+	// Add a sample of elapsed time as microseconds to the family's timeseries
+	h := new(histogram)
+	h.addMeasurement(elapsed.Nanoseconds() / 1e3)
+	f.LatencyMu.Lock()
+	f.Latency.Add(h)
+	f.LatencyMu.Unlock()
+
+	tr.unref() // matches ref in New
+}
+
+const (
+	bucketsPerFamily    = 9
+	tracesPerBucket     = 10
+	maxActiveTraces     = 20 // Maximum number of active traces to show.
+	maxEventsPerTrace   = 10
+	numHistogramBuckets = 38
+)
+
+var (
+	// The active traces.
+	activeMu     sync.RWMutex
+	activeTraces = make(map[string]*traceSet) // family -> traces
+
+	// Families of completed traces.
+	completedMu     sync.RWMutex
+	completedTraces = make(map[string]*family) // family -> traces
+)
+
+type traceSet struct {
+	mu sync.RWMutex
+	m  map[*trace]bool
+
+	// We could avoid the entire map scan in FirstN by having a slice of all the traces
+	// ordered by start time, and an index into that from the trace struct, with a periodic
+	// repack of the slice after enough traces finish; we could also use a skip list or similar.
+	// However, that would shift some of the expense from /debug/requests time to RPC time,
+	// which is probably the wrong trade-off.
+}
+
+func (ts *traceSet) Len() int {
+	ts.mu.RLock()
+	defer ts.mu.RUnlock()
+	return len(ts.m)
+}
+
+func (ts *traceSet) Add(tr *trace) {
+	ts.mu.Lock()
+	if ts.m == nil {
+		ts.m = make(map[*trace]bool)
+	}
+	ts.m[tr] = true
+	ts.mu.Unlock()
+}
+
+func (ts *traceSet) Remove(tr *trace) {
+	ts.mu.Lock()
+	delete(ts.m, tr)
+	ts.mu.Unlock()
+}
+
+// FirstN returns the first n traces ordered by time.
+func (ts *traceSet) FirstN(n int) traceList {
+	ts.mu.RLock()
+	defer ts.mu.RUnlock()
+
+	if n > len(ts.m) {
+		n = len(ts.m)
+	}
+	trl := make(traceList, 0, n)
+
+	// Fast path for when no selectivity is needed.
+	if n == len(ts.m) {
+		for tr := range ts.m {
+			tr.ref()
+			trl = append(trl, tr)
+		}
+		sort.Sort(trl)
+		return trl
+	}
+
+	// Pick the oldest n traces.
+	// This is inefficient. See the comment in the traceSet struct.
+	for tr := range ts.m {
+		// Put the first n traces into trl in the order they occur.
+		// When we have n, sort trl, and thereafter maintain its order.
+		if len(trl) < n {
+			tr.ref()
+			trl = append(trl, tr)
+			if len(trl) == n {
+				// This is guaranteed to happen exactly once during this loop.
+				sort.Sort(trl)
+			}
+			continue
+		}
+		if tr.Start.After(trl[n-1].Start) {
+			continue
+		}
+
+		// Find where to insert this one.
+		tr.ref()
+		i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
+		trl[n-1].unref()
+		copy(trl[i+1:], trl[i:])
+		trl[i] = tr
+	}
+
+	return trl
+}
+
+func getActiveTraces(fam string) traceList {
+	activeMu.RLock()
+	s := activeTraces[fam]
+	activeMu.RUnlock()
+	if s == nil {
+		return nil
+	}
+	return s.FirstN(maxActiveTraces)
+}
+
+func getFamily(fam string, allocNew bool) *family {
+	completedMu.RLock()
+	f := completedTraces[fam]
+	completedMu.RUnlock()
+	if f == nil && allocNew {
+		f = allocFamily(fam)
+	}
+	return f
+}
+
+func allocFamily(fam string) *family {
+	completedMu.Lock()
+	defer completedMu.Unlock()
+	f := completedTraces[fam]
+	if f == nil {
+		f = newFamily()
+		completedTraces[fam] = f
+	}
+	return f
+}
+
+// family represents a set of trace buckets and associated latency information.
+type family struct {
+	// traces may occur in multiple buckets.
+	Buckets [bucketsPerFamily]*traceBucket
+
+	// latency time series
+	LatencyMu sync.RWMutex
+	Latency   *timeseries.MinuteHourSeries
+}
+
+func newFamily() *family {
+	return &family{
+		Buckets: [bucketsPerFamily]*traceBucket{
+			{Cond: minCond(0)},
+			{Cond: minCond(50 * time.Millisecond)},
+			{Cond: minCond(100 * time.Millisecond)},
+			{Cond: minCond(200 * time.Millisecond)},
+			{Cond: minCond(500 * time.Millisecond)},
+			{Cond: minCond(1 * time.Second)},
+			{Cond: minCond(10 * time.Second)},
+			{Cond: minCond(100 * time.Second)},
+			{Cond: errorCond{}},
+		},
+		Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
+	}
+}
+
+// traceBucket represents a size-capped bucket of historic traces,
+// along with a condition for a trace to belong to the bucket.
+type traceBucket struct {
+	Cond cond
+
+	// Ring buffer implementation of a fixed-size FIFO queue.
+	mu     sync.RWMutex
+	buf    [tracesPerBucket]*trace
+	start  int // < tracesPerBucket
+	length int // <= tracesPerBucket
+}
+
+func (b *traceBucket) Add(tr *trace) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+
+	i := b.start + b.length
+	if i >= tracesPerBucket {
+		i -= tracesPerBucket
+	}
+	if b.length == tracesPerBucket {
+		// "Remove" an element from the bucket.
+		b.buf[i].unref()
+		b.start++
+		if b.start == tracesPerBucket {
+			b.start = 0
+		}
+	}
+	b.buf[i] = tr
+	if b.length < tracesPerBucket {
+		b.length++
+	}
+	tr.ref()
+}
+
+// Copy returns a copy of the traces in the bucket.
+// If tracedOnly is true, only the traces with trace information will be returned.
+// The logs will be ref'd before returning; the caller should call
+// the Free method when it is done with them.
+// TODO(dsymonds): keep track of traced requests in separate buckets.
+func (b *traceBucket) Copy(tracedOnly bool) traceList {
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+
+	trl := make(traceList, 0, b.length)
+	for i, x := 0, b.start; i < b.length; i++ {
+		tr := b.buf[x]
+		if !tracedOnly || tr.spanID != 0 {
+			tr.ref()
+			trl = append(trl, tr)
+		}
+		x++
+		if x == b.length {
+			x = 0
+		}
+	}
+	return trl
+}
+
+func (b *traceBucket) Empty() bool {
+	b.mu.RLock()
+	defer b.mu.RUnlock()
+	return b.length == 0
+}
+
+// cond represents a condition on a trace.
+type cond interface {
+	match(t *trace) bool
+	String() string
+}
+
+type minCond time.Duration
+
+func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
+func (m minCond) String() string      { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
+
+type errorCond struct{}
+
+func (e errorCond) match(t *trace) bool { return t.IsError }
+func (e errorCond) String() string      { return "errors" }
+
+type traceList []*trace
+
+// Free calls unref on each element of the list.
+func (trl traceList) Free() {
+	for _, t := range trl {
+		t.unref()
+	}
+}
+
+// traceList may be sorted in reverse chronological order.
+func (trl traceList) Len() int           { return len(trl) }
+func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
+func (trl traceList) Swap(i, j int)      { trl[i], trl[j] = trl[j], trl[i] }
+
+// An event is a timestamped log entry in a trace.
+type event struct {
+	When       time.Time
+	Elapsed    time.Duration // since previous event in trace
+	NewDay     bool          // whether this event is on a different day to the previous event
+	Recyclable bool          // whether this event was passed via LazyLog
+	Sensitive  bool          // whether this event contains sensitive information
+	What       interface{}   // string or fmt.Stringer
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e event) WhenString() string {
+	if e.NewDay {
+		return e.When.Format("2006/01/02 15:04:05.000000")
+	}
+	return e.When.Format("15:04:05.000000")
+}
+
+// discarded represents a number of discarded events.
+// It is stored as *discarded to make it easier to update in-place.
+type discarded int
+
+func (d *discarded) String() string {
+	return fmt.Sprintf("(%d events discarded)", int(*d))
+}
+
+// trace represents an active or complete request,
+// either sent or received by this program.
+type trace struct {
+	// Family is the top-level grouping of traces to which this belongs.
+	Family string
+
+	// Title is the title of this trace.
+	Title string
+
+	// Start time of the this trace.
+	Start time.Time
+
+	mu        sync.RWMutex
+	events    []event // Append-only sequence of events (modulo discards).
+	maxEvents int
+	recycler  func(interface{})
+	IsError   bool          // Whether this trace resulted in an error.
+	Elapsed   time.Duration // Elapsed time for this trace, zero while active.
+	traceID   uint64        // Trace information if non-zero.
+	spanID    uint64
+
+	refs int32     // how many buckets this is in
+	disc discarded // scratch space to avoid allocation
+
+	finishStack []byte // where finish was called, if DebugUseAfterFinish is set
+
+	eventsBuf [4]event // preallocated buffer in case we only log a few events
+}
+
+func (tr *trace) reset() {
+	// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+	tr.Family = ""
+	tr.Title = ""
+	tr.Start = time.Time{}
+
+	tr.mu.Lock()
+	tr.Elapsed = 0
+	tr.traceID = 0
+	tr.spanID = 0
+	tr.IsError = false
+	tr.maxEvents = 0
+	tr.events = nil
+	tr.recycler = nil
+	tr.mu.Unlock()
+
+	tr.refs = 0
+	tr.disc = 0
+	tr.finishStack = nil
+	for i := range tr.eventsBuf {
+		tr.eventsBuf[i] = event{}
+	}
+}
+
+// delta returns the elapsed time since the last event or the trace start,
+// and whether it spans midnight.
+// L >= tr.mu
+func (tr *trace) delta(t time.Time) (time.Duration, bool) {
+	if len(tr.events) == 0 {
+		return t.Sub(tr.Start), false
+	}
+	prev := tr.events[len(tr.events)-1].When
+	return t.Sub(prev), prev.Day() != t.Day()
+}
+
+func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
+	if DebugUseAfterFinish && tr.finishStack != nil {
+		buf := make([]byte, 4<<10) // 4 KB should be enough
+		n := runtime.Stack(buf, false)
+		log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
+	}
+
+	/*
+		NOTE TO DEBUGGERS
+
+		If you are here because your program panicked in this code,
+		it is almost definitely the fault of code using this package,
+		and very unlikely to be the fault of this code.
+
+		The most likely scenario is that some code elsewhere is using
+		a trace.Trace after its Finish method is called.
+		You can temporarily set the DebugUseAfterFinish var
+		to help discover where that is; do not leave that var set,
+		since it makes this package much less efficient.
+	*/
+
+	e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
+	tr.mu.Lock()
+	e.Elapsed, e.NewDay = tr.delta(e.When)
+	if len(tr.events) < tr.maxEvents {
+		tr.events = append(tr.events, e)
+	} else {
+		// Discard the middle events.
+		di := int((tr.maxEvents - 1) / 2)
+		if d, ok := tr.events[di].What.(*discarded); ok {
+			(*d)++
+		} else {
+			// disc starts at two to count for the event it is replacing,
+			// plus the next one that we are about to drop.
+			tr.disc = 2
+			if tr.recycler != nil && tr.events[di].Recyclable {
+				go tr.recycler(tr.events[di].What)
+			}
+			tr.events[di].What = &tr.disc
+		}
+		// The timestamp of the discarded meta-event should be
+		// the time of the last event it is representing.
+		tr.events[di].When = tr.events[di+1].When
+
+		if tr.recycler != nil && tr.events[di+1].Recyclable {
+			go tr.recycler(tr.events[di+1].What)
+		}
+		copy(tr.events[di+1:], tr.events[di+2:])
+		tr.events[tr.maxEvents-1] = e
+	}
+	tr.mu.Unlock()
+}
+
+func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
+	tr.addEvent(x, true, sensitive)
+}
+
+func (tr *trace) LazyPrintf(format string, a ...interface{}) {
+	tr.addEvent(&lazySprintf{format, a}, false, false)
+}
+
+func (tr *trace) SetError() {
+	tr.mu.Lock()
+	tr.IsError = true
+	tr.mu.Unlock()
+}
+
+func (tr *trace) SetRecycler(f func(interface{})) {
+	tr.mu.Lock()
+	tr.recycler = f
+	tr.mu.Unlock()
+}
+
+func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+	tr.mu.Lock()
+	tr.traceID, tr.spanID = traceID, spanID
+	tr.mu.Unlock()
+}
+
+func (tr *trace) SetMaxEvents(m int) {
+	tr.mu.Lock()
+	// Always keep at least three events: first, discarded count, last.
+	if len(tr.events) == 0 && m > 3 {
+		tr.maxEvents = m
+	}
+	tr.mu.Unlock()
+}
+
+func (tr *trace) ref() {
+	atomic.AddInt32(&tr.refs, 1)
+}
+
+func (tr *trace) unref() {
+	if atomic.AddInt32(&tr.refs, -1) == 0 {
+		tr.mu.RLock()
+		if tr.recycler != nil {
+			// freeTrace clears tr, so we hold tr.recycler and tr.events here.
+			go func(f func(interface{}), es []event) {
+				for _, e := range es {
+					if e.Recyclable {
+						f(e.What)
+					}
+				}
+			}(tr.recycler, tr.events)
+		}
+		tr.mu.RUnlock()
+
+		freeTrace(tr)
+	}
+}
+
+func (tr *trace) When() string {
+	return tr.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (tr *trace) ElapsedTime() string {
+	tr.mu.RLock()
+	t := tr.Elapsed
+	tr.mu.RUnlock()
+
+	if t == 0 {
+		// Active trace.
+		t = time.Since(tr.Start)
+	}
+	return fmt.Sprintf("%.6f", t.Seconds())
+}
+
+func (tr *trace) Events() []event {
+	tr.mu.RLock()
+	defer tr.mu.RUnlock()
+	return tr.events
+}
+
+var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
+
+// newTrace returns a trace ready to use.
+func newTrace() *trace {
+	select {
+	case tr := <-traceFreeList:
+		return tr
+	default:
+		return new(trace)
+	}
+}
+
+// freeTrace adds tr to traceFreeList if there's room.
+// This is non-blocking.
+func freeTrace(tr *trace) {
+	if DebugUseAfterFinish {
+		return // never reuse
+	}
+	tr.reset()
+	select {
+	case traceFreeList <- tr:
+	default:
+	}
+}
+
+func elapsed(d time.Duration) string {
+	b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
+
+	// For subsecond durations, blank all zeros before decimal point,
+	// and all zeros between the decimal point and the first non-zero digit.
+	if d < time.Second {
+		dot := bytes.IndexByte(b, '.')
+		for i := 0; i < dot; i++ {
+			b[i] = ' '
+		}
+		for i := dot + 1; i < len(b); i++ {
+			if b[i] == '0' {
+				b[i] = ' '
+			} else {
+				break
+			}
+		}
+	}
+
+	return string(b)
+}
+
+var pageTmplCache *template.Template
+var pageTmplOnce sync.Once
+
+func pageTmpl() *template.Template {
+	pageTmplOnce.Do(func() {
+		pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{
+			"elapsed": elapsed,
+			"add":     func(a, b int) int { return a + b },
+		}).Parse(pageHTML))
+	})
+	return pageTmplCache
+}
+
+const pageHTML = `
+{{template "Prolog" .}}
+{{template "StatusTable" .}}
+{{template "Epilog" .}}
+
+{{define "Prolog"}}
+<html>
+	<head>
+	<title>/debug/requests</title>
+	<style type="text/css">
+		body {
+			font-family: sans-serif;
+		}
+		table#tr-status td.family {
+			padding-right: 2em;
+		}
+		table#tr-status td.active {
+			padding-right: 1em;
+		}
+		table#tr-status td.latency-first {
+			padding-left: 1em;
+		}
+		table#tr-status td.empty {
+			color: #aaa;
+		}
+		table#reqs {
+			margin-top: 1em;
+		}
+		table#reqs tr.first {
+			{{if $.Expanded}}font-weight: bold;{{end}}
+		}
+		table#reqs td {
+			font-family: monospace;
+		}
+		table#reqs td.when {
+			text-align: right;
+			white-space: nowrap;
+		}
+		table#reqs td.elapsed {
+			padding: 0 0.5em;
+			text-align: right;
+			white-space: pre;
+			width: 10em;
+		}
+		address {
+			font-size: smaller;
+			margin-top: 5em;
+		}
+	</style>
+	</head>
+	<body>
+
+<h1>/debug/requests</h1>
+{{end}} {{/* end of Prolog */}}
+
+{{define "StatusTable"}}
+<table id="tr-status">
+	{{range $fam := .Families}}
+	<tr>
+		<td class="family">{{$fam}}</td>
+
+		{{$n := index $.ActiveTraceCount $fam}}
+		<td class="active {{if not $n}}empty{{end}}">
+			{{if $n}}<a href="?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}">{{end}}
+			[{{$n}} active]
+			{{if $n}}</a>{{end}}
+		</td>
+
+		{{$f := index $.CompletedTraces $fam}}
+		{{range $i, $b := $f.Buckets}}
+		{{$empty := $b.Empty}}
+		<td {{if $empty}}class="empty"{{end}}>
+		{{if not $empty}}<a href="?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+		[{{.Cond}}]
+		{{if not $empty}}</a>{{end}}
+		</td>
+		{{end}}
+
+		{{$nb := len $f.Buckets}}
+		<td class="latency-first">
+		<a href="?fam={{$fam}}&b={{$nb}}">[minute]</a>
+		</td>
+		<td>
+		<a href="?fam={{$fam}}&b={{add $nb 1}}">[hour]</a>
+		</td>
+		<td>
+		<a href="?fam={{$fam}}&b={{add $nb 2}}">[total]</a>
+		</td>
+
+	</tr>
+	{{end}}
+</table>
+{{end}} {{/* end of StatusTable */}}
+
+{{define "Epilog"}}
+{{if $.Traces}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if or $.Expanded $.Traced}}
+  <a href="?fam={{$.Family}}&b={{$.Bucket}}">[Normal/Summary]</a>
+{{else}}
+  [Normal/Summary]
+{{end}}
+
+{{if or (not $.Expanded) $.Traced}}
+  <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">[Normal/Expanded]</a>
+{{else}}
+  [Normal/Expanded]
+{{end}}
+
+{{if not $.Active}}
+	{{if or $.Expanded (not $.Traced)}}
+	<a href="?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1">[Traced/Summary]</a>
+	{{else}}
+	[Traced/Summary]
+	{{end}}
+	{{if or (not $.Expanded) (not $.Traced)}}
+	<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1">[Traced/Expanded]</a>
+        {{else}}
+	[Traced/Expanded]
+	{{end}}
+{{end}}
+
+{{if $.Total}}
+<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p>
+{{end}}
+
+<table id="reqs">
+	<caption>
+		{{if $.Active}}Active{{else}}Completed{{end}} Requests
+	</caption>
+	<tr><th>When</th><th>Elapsed&nbsp;(s)</th></tr>
+	{{range $tr := $.Traces}}
+	<tr class="first">
+		<td class="when">{{$tr.When}}</td>
+		<td class="elapsed">{{$tr.ElapsedTime}}</td>
+		<td>{{$tr.Title}}</td>
+		{{/* TODO: include traceID/spanID */}}
+	</tr>
+	{{if $.Expanded}}
+	{{range $tr.Events}}
+	<tr>
+		<td class="when">{{.WhenString}}</td>
+		<td class="elapsed">{{elapsed .Elapsed}}</td>
+		<td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td>
+	</tr>
+	{{end}}
+	{{end}}
+	{{end}}
+</table>
+{{end}} {{/* if $.Traces */}}
+
+{{if $.Histogram}}
+<h4>Latency (&micro;s) of {{$.Family}} over {{$.HistogramWindow}}</h4>
+{{$.Histogram}}
+{{end}} {{/* if $.Histogram */}}
+
+	</body>
+</html>
+{{end}} {{/* end of Epilog */}}
+`
diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go
new file mode 100644
index 000000000..6c8d97b6a
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/key.go
@@ -0,0 +1,206 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+// Package registry provides access to the Windows registry.
+//
+// Here is a simple example, opening a registry key and reading a string value from it.
+//
+//	k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+//	if err != nil {
+//		log.Fatal(err)
+//	}
+//	defer k.Close()
+//
+//	s, _, err := k.GetStringValue("SystemRoot")
+//	if err != nil {
+//		log.Fatal(err)
+//	}
+//	fmt.Printf("Windows system root is %q\n", s)
+package registry
+
+import (
+	"io"
+	"runtime"
+	"syscall"
+	"time"
+)
+
+const (
+	// Registry key security and access rights.
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
+	// for details.
+	ALL_ACCESS         = 0xf003f
+	CREATE_LINK        = 0x00020
+	CREATE_SUB_KEY     = 0x00004
+	ENUMERATE_SUB_KEYS = 0x00008
+	EXECUTE            = 0x20019
+	NOTIFY             = 0x00010
+	QUERY_VALUE        = 0x00001
+	READ               = 0x20019
+	SET_VALUE          = 0x00002
+	WOW64_32KEY        = 0x00200
+	WOW64_64KEY        = 0x00100
+	WRITE              = 0x20006
+)
+
+// Key is a handle to an open Windows registry key.
+// Keys can be obtained by calling OpenKey; there are
+// also some predefined root keys such as CURRENT_USER.
+// Keys can be used directly in the Windows API.
+type Key syscall.Handle
+
+const (
+	// Windows defines some predefined root keys that are always open.
+	// An application can use these keys as entry points to the registry.
+	// Normally these keys are used in OpenKey to open new keys,
+	// but they can also be used anywhere a Key is required.
+	CLASSES_ROOT     = Key(syscall.HKEY_CLASSES_ROOT)
+	CURRENT_USER     = Key(syscall.HKEY_CURRENT_USER)
+	LOCAL_MACHINE    = Key(syscall.HKEY_LOCAL_MACHINE)
+	USERS            = Key(syscall.HKEY_USERS)
+	CURRENT_CONFIG   = Key(syscall.HKEY_CURRENT_CONFIG)
+	PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA)
+)
+
+// Close closes open key k.
+func (k Key) Close() error {
+	return syscall.RegCloseKey(syscall.Handle(k))
+}
+
+// OpenKey opens a new key with path name relative to key k.
+// It accepts any open key, including CURRENT_USER and others,
+// and returns the new key and an error.
+// The access parameter specifies desired access rights to the
+// key to be opened.
+func OpenKey(k Key, path string, access uint32) (Key, error) {
+	p, err := syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return 0, err
+	}
+	var subkey syscall.Handle
+	err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey)
+	if err != nil {
+		return 0, err
+	}
+	return Key(subkey), nil
+}
+
+// OpenRemoteKey opens a predefined registry key on another
+// computer pcname. The key to be opened is specified by k, but
+// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS.
+// If pcname is "", OpenRemoteKey returns local computer key.
+func OpenRemoteKey(pcname string, k Key) (Key, error) {
+	var err error
+	var p *uint16
+	if pcname != "" {
+		p, err = syscall.UTF16PtrFromString(`\\` + pcname)
+		if err != nil {
+			return 0, err
+		}
+	}
+	var remoteKey syscall.Handle
+	err = regConnectRegistry(p, syscall.Handle(k), &remoteKey)
+	if err != nil {
+		return 0, err
+	}
+	return Key(remoteKey), nil
+}
+
+// ReadSubKeyNames returns the names of subkeys of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadSubKeyNames(n int) ([]string, error) {
+	// RegEnumKeyEx must be called repeatedly and to completion.
+	// During this time, this goroutine cannot migrate away from
+	// its current thread. See https://golang.org/issue/49320 and
+	// https://golang.org/issue/49466.
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+
+	names := make([]string, 0)
+	// Registry key size limit is 255 bytes and described there:
+	// https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
+	buf := make([]uint16, 256) //plus extra room for terminating zero byte
+loopItems:
+	for i := uint32(0); ; i++ {
+		if n > 0 {
+			if len(names) == n {
+				return names, nil
+			}
+		}
+		l := uint32(len(buf))
+		for {
+			err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+			if err == nil {
+				break
+			}
+			if err == syscall.ERROR_MORE_DATA {
+				// Double buffer size and try again.
+				l = uint32(2 * len(buf))
+				buf = make([]uint16, l)
+				continue
+			}
+			if err == _ERROR_NO_MORE_ITEMS {
+				break loopItems
+			}
+			return names, err
+		}
+		names = append(names, syscall.UTF16ToString(buf[:l]))
+	}
+	if n > len(names) {
+		return names, io.EOF
+	}
+	return names, nil
+}
+
+// CreateKey creates a key named path under open key k.
+// CreateKey returns the new key and a boolean flag that reports
+// whether the key already existed.
+// The access parameter specifies the access rights for the key
+// to be created.
+func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
+	var h syscall.Handle
+	var d uint32
+	err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path),
+		0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
+	if err != nil {
+		return 0, false, err
+	}
+	return Key(h), d == _REG_OPENED_EXISTING_KEY, nil
+}
+
+// DeleteKey deletes the subkey path of key k and its values.
+func DeleteKey(k Key, path string) error {
+	return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path))
+}
+
+// A KeyInfo describes the statistics of a key. It is returned by Stat.
+type KeyInfo struct {
+	SubKeyCount     uint32
+	MaxSubKeyLen    uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte
+	ValueCount      uint32
+	MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte
+	MaxValueLen     uint32 // longest data component among the key's values, in bytes
+	lastWriteTime   syscall.Filetime
+}
+
+// ModTime returns the key's last write time.
+func (ki *KeyInfo) ModTime() time.Time {
+	return time.Unix(0, ki.lastWriteTime.Nanoseconds())
+}
+
+// Stat retrieves information about the open key k.
+func (k Key) Stat() (*KeyInfo, error) {
+	var ki KeyInfo
+	err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil,
+		&ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount,
+		&ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime)
+	if err != nil {
+		return nil, err
+	}
+	return &ki, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
new file mode 100644
index 000000000..ee74927d3
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
@@ -0,0 +1,10 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build generate
+// +build generate
+
+package registry
+
+//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go
diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go
new file mode 100644
index 000000000..417335123
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/syscall.go
@@ -0,0 +1,33 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+package registry
+
+import "syscall"
+
+const (
+	_REG_OPTION_NON_VOLATILE = 0
+
+	_REG_CREATED_NEW_KEY     = 1
+	_REG_OPENED_EXISTING_KEY = 2
+
+	_ERROR_NO_MORE_ITEMS syscall.Errno = 259
+)
+
+func LoadRegLoadMUIString() error {
+	return procRegLoadMUIStringW.Find()
+}
+
+//sys	regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
+//sys	regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
+//sys	regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
+//sys	regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW
+//sys	regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW
+//sys   regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW
+//sys	regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW
+
+//sys	expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
new file mode 100644
index 000000000..2789f6f18
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -0,0 +1,387 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+package registry
+
+import (
+	"errors"
+	"io"
+	"syscall"
+	"unicode/utf16"
+	"unsafe"
+)
+
+const (
+	// Registry value types.
+	NONE                       = 0
+	SZ                         = 1
+	EXPAND_SZ                  = 2
+	BINARY                     = 3
+	DWORD                      = 4
+	DWORD_BIG_ENDIAN           = 5
+	LINK                       = 6
+	MULTI_SZ                   = 7
+	RESOURCE_LIST              = 8
+	FULL_RESOURCE_DESCRIPTOR   = 9
+	RESOURCE_REQUIREMENTS_LIST = 10
+	QWORD                      = 11
+)
+
+var (
+	// ErrShortBuffer is returned when the buffer was too short for the operation.
+	ErrShortBuffer = syscall.ERROR_MORE_DATA
+
+	// ErrNotExist is returned when a registry key or value does not exist.
+	ErrNotExist = syscall.ERROR_FILE_NOT_FOUND
+
+	// ErrUnexpectedType is returned by Get*Value when the value's type was unexpected.
+	ErrUnexpectedType = errors.New("unexpected key value type")
+)
+
+// GetValue retrieves the type and data for the specified value associated
+// with an open key k. It fills up buffer buf and returns the retrieved
+// byte count n. If buf is too small to fit the stored value it returns
+// ErrShortBuffer error along with the required buffer size n.
+// If no buffer is provided, it returns true and actual buffer size n.
+// If no buffer is provided, GetValue returns the value's type only.
+// If the value does not exist, the error returned is ErrNotExist.
+//
+// GetValue is a low level function. If value's type is known, use the appropriate
+// Get*Value function instead.
+func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) {
+	pname, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return 0, 0, err
+	}
+	var pbuf *byte
+	if len(buf) > 0 {
+		pbuf = (*byte)(unsafe.Pointer(&buf[0]))
+	}
+	l := uint32(len(buf))
+	err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l)
+	if err != nil {
+		return int(l), valtype, err
+	}
+	return int(l), valtype, nil
+}
+
+func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) {
+	p, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return nil, 0, err
+	}
+	var t uint32
+	n := uint32(len(buf))
+	for {
+		err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n)
+		if err == nil {
+			return buf[:n], t, nil
+		}
+		if err != syscall.ERROR_MORE_DATA {
+			return nil, 0, err
+		}
+		if n <= uint32(len(buf)) {
+			return nil, 0, err
+		}
+		buf = make([]byte, n)
+	}
+}
+
+// GetStringValue retrieves the string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringValue returns ErrNotExist.
+// If value is not SZ or EXPAND_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 64))
+	if err2 != nil {
+		return "", typ, err2
+	}
+	switch typ {
+	case SZ, EXPAND_SZ:
+	default:
+		return "", typ, ErrUnexpectedType
+	}
+	if len(data) == 0 {
+		return "", typ, nil
+	}
+	u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+	return syscall.UTF16ToString(u), typ, nil
+}
+
+// GetMUIStringValue retrieves the localized string value for
+// the specified value name associated with an open key k.
+// If the value name doesn't exist or the localized string value
+// can't be resolved, GetMUIStringValue returns ErrNotExist.
+// GetMUIStringValue panics if the system doesn't support
+// regLoadMUIString; use LoadRegLoadMUIString to check if
+// regLoadMUIString is supported before calling this function.
+func (k Key) GetMUIStringValue(name string) (string, error) {
+	pname, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return "", err
+	}
+
+	buf := make([]uint16, 1024)
+	var buflen uint32
+	var pdir *uint16
+
+	err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+	if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path
+
+		// Try to resolve the string value using the system directory as
+		// a DLL search path; this assumes the string value is of the form
+		// @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320.
+
+		// This approach works with tzres.dll but may have to be revised
+		// in the future to allow callers to provide custom search paths.
+
+		var s string
+		s, err = ExpandString("%SystemRoot%\\system32\\")
+		if err != nil {
+			return "", err
+		}
+		pdir, err = syscall.UTF16PtrFromString(s)
+		if err != nil {
+			return "", err
+		}
+
+		err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+	}
+
+	for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed
+		if buflen <= uint32(len(buf)) {
+			break // Buffer not growing, assume race; break
+		}
+		buf = make([]uint16, buflen)
+		err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+	}
+
+	if err != nil {
+		return "", err
+	}
+
+	return syscall.UTF16ToString(buf), nil
+}
+
+// ExpandString expands environment-variable strings and replaces
+// them with the values defined for the current user.
+// Use ExpandString to expand EXPAND_SZ strings.
+func ExpandString(value string) (string, error) {
+	if value == "" {
+		return "", nil
+	}
+	p, err := syscall.UTF16PtrFromString(value)
+	if err != nil {
+		return "", err
+	}
+	r := make([]uint16, 100)
+	for {
+		n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r)))
+		if err != nil {
+			return "", err
+		}
+		if n <= uint32(len(r)) {
+			return syscall.UTF16ToString(r[:n]), nil
+		}
+		r = make([]uint16, n)
+	}
+}
+
+// GetStringsValue retrieves the []string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringsValue returns ErrNotExist.
+// If value is not MULTI_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 64))
+	if err2 != nil {
+		return nil, typ, err2
+	}
+	if typ != MULTI_SZ {
+		return nil, typ, ErrUnexpectedType
+	}
+	if len(data) == 0 {
+		return nil, typ, nil
+	}
+	p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+	if len(p) == 0 {
+		return nil, typ, nil
+	}
+	if p[len(p)-1] == 0 {
+		p = p[:len(p)-1] // remove terminating null
+	}
+	val = make([]string, 0, 5)
+	from := 0
+	for i, c := range p {
+		if c == 0 {
+			val = append(val, string(utf16.Decode(p[from:i])))
+			from = i + 1
+		}
+	}
+	return val, typ, nil
+}
+
+// GetIntegerValue retrieves the integer value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetIntegerValue returns ErrNotExist.
+// If value is not DWORD or QWORD, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 8))
+	if err2 != nil {
+		return 0, typ, err2
+	}
+	switch typ {
+	case DWORD:
+		if len(data) != 4 {
+			return 0, typ, errors.New("DWORD value is not 4 bytes long")
+		}
+		var val32 uint32
+		copy((*[4]byte)(unsafe.Pointer(&val32))[:], data)
+		return uint64(val32), DWORD, nil
+	case QWORD:
+		if len(data) != 8 {
+			return 0, typ, errors.New("QWORD value is not 8 bytes long")
+		}
+		copy((*[8]byte)(unsafe.Pointer(&val))[:], data)
+		return val, QWORD, nil
+	default:
+		return 0, typ, ErrUnexpectedType
+	}
+}
+
+// GetBinaryValue retrieves the binary value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetBinaryValue returns ErrNotExist.
+// If value is not BINARY, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) {
+	data, typ, err2 := k.getValue(name, make([]byte, 64))
+	if err2 != nil {
+		return nil, typ, err2
+	}
+	if typ != BINARY {
+		return nil, typ, ErrUnexpectedType
+	}
+	return data, typ, nil
+}
+
+func (k Key) setValue(name string, valtype uint32, data []byte) error {
+	p, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return err
+	}
+	if len(data) == 0 {
+		return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0)
+	}
+	return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data)))
+}
+
+// SetDWordValue sets the data and type of a name value
+// under key k to value and DWORD.
+func (k Key) SetDWordValue(name string, value uint32) error {
+	return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:])
+}
+
+// SetQWordValue sets the data and type of a name value
+// under key k to value and QWORD.
+func (k Key) SetQWordValue(name string, value uint64) error {
+	return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:])
+}
+
+func (k Key) setStringValue(name string, valtype uint32, value string) error {
+	v, err := syscall.UTF16FromString(value)
+	if err != nil {
+		return err
+	}
+	buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+	return k.setValue(name, valtype, buf)
+}
+
+// SetStringValue sets the data and type of a name value
+// under key k to value and SZ. The value must not contain a zero byte.
+func (k Key) SetStringValue(name, value string) error {
+	return k.setStringValue(name, SZ, value)
+}
+
+// SetExpandStringValue sets the data and type of a name value
+// under key k to value and EXPAND_SZ. The value must not contain a zero byte.
+func (k Key) SetExpandStringValue(name, value string) error {
+	return k.setStringValue(name, EXPAND_SZ, value)
+}
+
+// SetStringsValue sets the data and type of a name value
+// under key k to value and MULTI_SZ. The value strings
+// must not contain a zero byte.
+func (k Key) SetStringsValue(name string, value []string) error {
+	ss := ""
+	for _, s := range value {
+		for i := 0; i < len(s); i++ {
+			if s[i] == 0 {
+				return errors.New("string cannot have 0 inside")
+			}
+		}
+		ss += s + "\x00"
+	}
+	v := utf16.Encode([]rune(ss + "\x00"))
+	buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+	return k.setValue(name, MULTI_SZ, buf)
+}
+
+// SetBinaryValue sets the data and type of a name value
+// under key k to value and BINARY.
+func (k Key) SetBinaryValue(name string, value []byte) error {
+	return k.setValue(name, BINARY, value)
+}
+
+// DeleteValue removes a named value from the key k.
+func (k Key) DeleteValue(name string) error {
+	return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name))
+}
+
+// ReadValueNames returns the value names of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadValueNames(n int) ([]string, error) {
+	ki, err := k.Stat()
+	if err != nil {
+		return nil, err
+	}
+	names := make([]string, 0, ki.ValueCount)
+	buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character
+loopItems:
+	for i := uint32(0); ; i++ {
+		if n > 0 {
+			if len(names) == n {
+				return names, nil
+			}
+		}
+		l := uint32(len(buf))
+		for {
+			err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+			if err == nil {
+				break
+			}
+			if err == syscall.ERROR_MORE_DATA {
+				// Double buffer size and try again.
+				l = uint32(2 * len(buf))
+				buf = make([]uint16, l)
+				continue
+			}
+			if err == _ERROR_NO_MORE_ITEMS {
+				break loopItems
+			}
+			return names, err
+		}
+		names = append(names, syscall.UTF16ToString(buf[:l]))
+	}
+	if n > len(names) {
+		return names, io.EOF
+	}
+	return names, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
new file mode 100644
index 000000000..fc1835d8a
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -0,0 +1,117 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package registry
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+	errERROR_EINVAL     error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return errERROR_EINVAL
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	// TODO: add more here, after collecting data on the common
+	// error values see on Windows. (perhaps when running
+	// all.bat?)
+	return e
+}
+
+var (
+	modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+	modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+	procRegConnectRegistryW       = modadvapi32.NewProc("RegConnectRegistryW")
+	procRegCreateKeyExW           = modadvapi32.NewProc("RegCreateKeyExW")
+	procRegDeleteKeyW             = modadvapi32.NewProc("RegDeleteKeyW")
+	procRegDeleteValueW           = modadvapi32.NewProc("RegDeleteValueW")
+	procRegEnumValueW             = modadvapi32.NewProc("RegEnumValueW")
+	procRegLoadMUIStringW         = modadvapi32.NewProc("RegLoadMUIStringW")
+	procRegSetValueExW            = modadvapi32.NewProc("RegSetValueExW")
+	procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW")
+)
+
+func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
+	r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
+	r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0)
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
+	r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/google.golang.org/genproto/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
new file mode 100644
index 000000000..af72196c8
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
@@ -0,0 +1,235 @@
+// Copyright 2015 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.12.2
+// source: google/api/httpbody.proto
+
+package httpbody
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Message that represents an arbitrary HTTP body. It should only be used for
+// payload formats that can't be represented as JSON, such as raw binary or
+// an HTML page.
+//
+// This message can be used both in streaming and non-streaming API methods in
+// the request as well as the response.
+//
+// It can be used as a top-level request field, which is convenient if one
+// wants to extract parameters from either the URL or HTTP template into the
+// request fields and also want access to the raw HTTP body.
+//
+// Example:
+//
+//	message GetResourceRequest {
+//	  // A unique request id.
+//	  string request_id = 1;
+//
+//	  // The raw HTTP body is bound to this field.
+//	  google.api.HttpBody http_body = 2;
+//
+//	}
+//
+//	service ResourceService {
+//	  rpc GetResource(GetResourceRequest)
+//	    returns (google.api.HttpBody);
+//	  rpc UpdateResource(google.api.HttpBody)
+//	    returns (google.protobuf.Empty);
+//
+//	}
+//
+// Example with streaming methods:
+//
+//	service CaldavService {
+//	  rpc GetCalendar(stream google.api.HttpBody)
+//	    returns (stream google.api.HttpBody);
+//	  rpc UpdateCalendar(stream google.api.HttpBody)
+//	    returns (stream google.api.HttpBody);
+//
+//	}
+//
+// Use of this type only changes how the request and response bodies are
+// handled, all other features will continue to work unchanged.
+type HttpBody struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The HTTP Content-Type header value specifying the content type of the body.
+	ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+	// The HTTP request/response body as raw binary.
+	Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	// Application specific response metadata. Must be set in the first response
+	// for streaming APIs.
+	Extensions []*anypb.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *HttpBody) Reset() {
+	*x = HttpBody{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_httpbody_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *HttpBody) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpBody) ProtoMessage() {}
+
+func (x *HttpBody) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_httpbody_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpBody.ProtoReflect.Descriptor instead.
+func (*HttpBody) Descriptor() ([]byte, []int) {
+	return file_google_api_httpbody_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HttpBody) GetContentType() string {
+	if x != nil {
+		return x.ContentType
+	}
+	return ""
+}
+
+func (x *HttpBody) GetData() []byte {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+func (x *HttpBody) GetExtensions() []*anypb.Any {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+var File_google_api_httpbody_proto protoreflect.FileDescriptor
+
+var file_google_api_httpbody_proto_rawDesc = []byte{
+	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74,
+	0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x22, 0x77, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21,
+	0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70,
+	0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
+	0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+	0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63,
+	0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48,
+	0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
+	0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f,
+	0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02,
+	0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_api_httpbody_proto_rawDescOnce sync.Once
+	file_google_api_httpbody_proto_rawDescData = file_google_api_httpbody_proto_rawDesc
+)
+
+func file_google_api_httpbody_proto_rawDescGZIP() []byte {
+	file_google_api_httpbody_proto_rawDescOnce.Do(func() {
+		file_google_api_httpbody_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_httpbody_proto_rawDescData)
+	})
+	return file_google_api_httpbody_proto_rawDescData
+}
+
+var file_google_api_httpbody_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_api_httpbody_proto_goTypes = []interface{}{
+	(*HttpBody)(nil),  // 0: google.api.HttpBody
+	(*anypb.Any)(nil), // 1: google.protobuf.Any
+}
+var file_google_api_httpbody_proto_depIdxs = []int32{
+	1, // 0: google.api.HttpBody.extensions:type_name -> google.protobuf.Any
+	1, // [1:1] is the sub-list for method output_type
+	1, // [1:1] is the sub-list for method input_type
+	1, // [1:1] is the sub-list for extension type_name
+	1, // [1:1] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_api_httpbody_proto_init() }
+func file_google_api_httpbody_proto_init() {
+	if File_google_api_httpbody_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_api_httpbody_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*HttpBody); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_api_httpbody_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_api_httpbody_proto_goTypes,
+		DependencyIndexes: file_google_api_httpbody_proto_depIdxs,
+		MessageInfos:      file_google_api_httpbody_proto_msgTypes,
+	}.Build()
+	File_google_api_httpbody_proto = out.File
+	file_google_api_httpbody_proto_rawDesc = nil
+	file_google_api_httpbody_proto_goTypes = nil
+	file_google_api_httpbody_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
new file mode 100644
index 000000000..7bd161e48
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
@@ -0,0 +1,1314 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.9
+// source: google/rpc/error_details.proto
+
+package errdetails
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Describes the cause of the error with structured details.
+//
+// Example of an error when contacting the "pubsub.googleapis.com" API when it
+// is not enabled:
+//
+//	{ "reason": "API_DISABLED"
+//	  "domain": "googleapis.com"
+//	  "metadata": {
+//	    "resource": "projects/123",
+//	    "service": "pubsub.googleapis.com"
+//	  }
+//	}
+//
+// This response indicates that the pubsub.googleapis.com API is not enabled.
+//
+// Example of an error that is returned when attempting to create a Spanner
+// instance in a region that is out of stock:
+//
+//	{ "reason": "STOCKOUT"
+//	  "domain": "spanner.googleapis.com",
+//	  "metadata": {
+//	    "availableRegions": "us-central1,us-east2"
+//	  }
+//	}
+type ErrorInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The reason of the error. This is a constant value that identifies the
+	// proximate cause of the error. Error reasons are unique within a particular
+	// domain of errors. This should be at most 63 characters and match a
+	// regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents
+	// UPPER_SNAKE_CASE.
+	Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"`
+	// The logical grouping to which the "reason" belongs. The error domain
+	// is typically the registered service name of the tool or product that
+	// generates the error. Example: "pubsub.googleapis.com". If the error is
+	// generated by some common infrastructure, the error domain must be a
+	// globally unique value that identifies the infrastructure. For Google API
+	// infrastructure, the error domain is "googleapis.com".
+	Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"`
+	// Additional structured details about this error.
+	//
+	// Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in
+	// length. When identifying the current value of an exceeded limit, the units
+	// should be contained in the key, not the value.  For example, rather than
+	// {"instanceLimit": "100/request"}, should be returned as,
+	// {"instanceLimitPerRequest": "100"}, if the client exceeds the number of
+	// instances that can be created in a single (batch) request.
+	Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *ErrorInfo) Reset() {
+	*x = ErrorInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ErrorInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorInfo) ProtoMessage() {}
+
+func (x *ErrorInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead.
+func (*ErrorInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ErrorInfo) GetReason() string {
+	if x != nil {
+		return x.Reason
+	}
+	return ""
+}
+
+func (x *ErrorInfo) GetDomain() string {
+	if x != nil {
+		return x.Domain
+	}
+	return ""
+}
+
+func (x *ErrorInfo) GetMetadata() map[string]string {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+// Describes when the clients can retry a failed request. Clients could ignore
+// the recommendation here or retry when this information is missing from error
+// responses.
+//
+// It's always recommended that clients should use exponential backoff when
+// retrying.
+//
+// Clients should wait until `retry_delay` amount of time has passed since
+// receiving the error response before retrying.  If retrying requests also
+// fail, clients should use an exponential backoff scheme to gradually increase
+// the delay between retries based on `retry_delay`, until either a maximum
+// number of retries have been reached or a maximum retry delay cap has been
+// reached.
+type RetryInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Clients should wait at least this long between retrying the same request.
+	RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"`
+}
+
+func (x *RetryInfo) Reset() {
+	*x = RetryInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *RetryInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RetryInfo) ProtoMessage() {}
+
+func (x *RetryInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead.
+func (*RetryInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *RetryInfo) GetRetryDelay() *durationpb.Duration {
+	if x != nil {
+		return x.RetryDelay
+	}
+	return nil
+}
+
+// Describes additional debugging info.
+type DebugInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The stack trace entries indicating where the error occurred.
+	StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"`
+	// Additional debugging information provided by the server.
+	Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+}
+
+func (x *DebugInfo) Reset() {
+	*x = DebugInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *DebugInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DebugInfo) ProtoMessage() {}
+
+func (x *DebugInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead.
+func (*DebugInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DebugInfo) GetStackEntries() []string {
+	if x != nil {
+		return x.StackEntries
+	}
+	return nil
+}
+
+func (x *DebugInfo) GetDetail() string {
+	if x != nil {
+		return x.Detail
+	}
+	return ""
+}
+
+// Describes how a quota check failed.
+//
+// For example if a daily limit was exceeded for the calling project,
+// a service could respond with a QuotaFailure detail containing the project
+// id and the description of the quota limit that was exceeded.  If the
+// calling project hasn't enabled the service in the developer console, then
+// a service could respond with the project id and set `service_disabled`
+// to true.
+//
+// Also see RetryInfo and Help types for other details about handling a
+// quota failure.
+type QuotaFailure struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes all quota violations.
+	Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *QuotaFailure) Reset() {
+	*x = QuotaFailure{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *QuotaFailure) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure) ProtoMessage() {}
+
+func (x *QuotaFailure) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead.
+func (*QuotaFailure) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation {
+	if x != nil {
+		return x.Violations
+	}
+	return nil
+}
+
+// Describes what preconditions have failed.
+//
+// For example, if an RPC failed because it required the Terms of Service to be
+// acknowledged, it could list the terms of service violation in the
+// PreconditionFailure message.
+type PreconditionFailure struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes all precondition violations.
+	Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *PreconditionFailure) Reset() {
+	*x = PreconditionFailure{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *PreconditionFailure) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure) ProtoMessage() {}
+
+func (x *PreconditionFailure) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation {
+	if x != nil {
+		return x.Violations
+	}
+	return nil
+}
+
+// Describes violations in a client request. This error type focuses on the
+// syntactic aspects of the request.
+type BadRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes all violations in a client request.
+	FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"`
+}
+
+func (x *BadRequest) Reset() {
+	*x = BadRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BadRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest) ProtoMessage() {}
+
+func (x *BadRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead.
+func (*BadRequest) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation {
+	if x != nil {
+		return x.FieldViolations
+	}
+	return nil
+}
+
+// Contains metadata about the request that clients can attach when filing a bug
+// or providing other forms of feedback.
+type RequestInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An opaque string that should only be interpreted by the service generating
+	// it. For example, it can be used to identify requests in the service's logs.
+	RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
+	// Any data that was used to serve this request. For example, an encrypted
+	// stack trace that can be sent back to the service provider for debugging.
+	ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"`
+}
+
+func (x *RequestInfo) Reset() {
+	*x = RequestInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *RequestInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RequestInfo) ProtoMessage() {}
+
+func (x *RequestInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead.
+func (*RequestInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *RequestInfo) GetRequestId() string {
+	if x != nil {
+		return x.RequestId
+	}
+	return ""
+}
+
+func (x *RequestInfo) GetServingData() string {
+	if x != nil {
+		return x.ServingData
+	}
+	return ""
+}
+
+// Describes the resource that is being accessed.
+type ResourceInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A name for the type of resource being accessed, e.g. "sql table",
+	// "cloud storage bucket", "file", "Google calendar"; or the type URL
+	// of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic".
+	ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+	// The name of the resource being accessed.  For example, a shared calendar
+	// name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
+	// error is
+	// [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+	ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
+	// The owner of the resource (optional).
+	// For example, "user:<owner email>" or "project:<Google developer project
+	// id>".
+	Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"`
+	// Describes what error is encountered when accessing this resource.
+	// For example, updating a cloud project may require the `writer` permission
+	// on the developer console project.
+	Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *ResourceInfo) Reset() {
+	*x = ResourceInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ResourceInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceInfo) ProtoMessage() {}
+
+func (x *ResourceInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead.
+func (*ResourceInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ResourceInfo) GetResourceType() string {
+	if x != nil {
+		return x.ResourceType
+	}
+	return ""
+}
+
+func (x *ResourceInfo) GetResourceName() string {
+	if x != nil {
+		return x.ResourceName
+	}
+	return ""
+}
+
+func (x *ResourceInfo) GetOwner() string {
+	if x != nil {
+		return x.Owner
+	}
+	return ""
+}
+
+func (x *ResourceInfo) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// Provides links to documentation or for performing an out of band action.
+//
+// For example, if a quota check failed with an error indicating the calling
+// project hasn't enabled the accessed service, this can contain a URL pointing
+// directly to the right place in the developer console to flip the bit.
+type Help struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// URL(s) pointing to additional information on handling the current error.
+	Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"`
+}
+
+func (x *Help) Reset() {
+	*x = Help{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Help) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help) ProtoMessage() {}
+
+func (x *Help) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help.ProtoReflect.Descriptor instead.
+func (*Help) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Help) GetLinks() []*Help_Link {
+	if x != nil {
+		return x.Links
+	}
+	return nil
+}
+
+// Provides a localized error message that is safe to return to the user
+// which can be attached to an RPC error.
+type LocalizedMessage struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The locale used following the specification defined at
+	// https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+	// Examples are: "en-US", "fr-CH", "es-MX"
+	Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"`
+	// The localized error message in the above locale.
+	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (x *LocalizedMessage) Reset() {
+	*x = LocalizedMessage{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *LocalizedMessage) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LocalizedMessage) ProtoMessage() {}
+
+func (x *LocalizedMessage) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead.
+func (*LocalizedMessage) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *LocalizedMessage) GetLocale() string {
+	if x != nil {
+		return x.Locale
+	}
+	return ""
+}
+
+func (x *LocalizedMessage) GetMessage() string {
+	if x != nil {
+		return x.Message
+	}
+	return ""
+}
+
+// A message type used to describe a single quota violation.  For example, a
+// daily quota or a custom quota that was exceeded.
+type QuotaFailure_Violation struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The subject on which the quota check failed.
+	// For example, "clientip:<ip address of client>" or "project:<Google
+	// developer project id>".
+	Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"`
+	// A description of how the quota check failed. Clients can use this
+	// description to find more about the quota configuration in the service's
+	// public documentation, or find the relevant quota limit to adjust through
+	// developer console.
+	//
+	// For example: "Service disabled" or "Daily Limit for read operations
+	// exceeded".
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *QuotaFailure_Violation) Reset() {
+	*x = QuotaFailure_Violation{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *QuotaFailure_Violation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure_Violation) ProtoMessage() {}
+
+func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead.
+func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *QuotaFailure_Violation) GetSubject() string {
+	if x != nil {
+		return x.Subject
+	}
+	return ""
+}
+
+func (x *QuotaFailure_Violation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// A message type used to describe a single precondition failure.
+type PreconditionFailure_Violation struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The type of PreconditionFailure. We recommend using a service-specific
+	// enum type to define the supported precondition violation subjects. For
+	// example, "TOS" for "Terms of Service violation".
+	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	// The subject, relative to the type, that failed.
+	// For example, "google.com/cloud" relative to the "TOS" type would indicate
+	// which terms of service is being referenced.
+	Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"`
+	// A description of how the precondition failed. Developers can use this
+	// description to understand how to fix the failure.
+	//
+	// For example: "Terms of service not accepted".
+	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *PreconditionFailure_Violation) Reset() {
+	*x = PreconditionFailure_Violation{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *PreconditionFailure_Violation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure_Violation) ProtoMessage() {}
+
+func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[12]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *PreconditionFailure_Violation) GetType() string {
+	if x != nil {
+		return x.Type
+	}
+	return ""
+}
+
+func (x *PreconditionFailure_Violation) GetSubject() string {
+	if x != nil {
+		return x.Subject
+	}
+	return ""
+}
+
+func (x *PreconditionFailure_Violation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// A message type used to describe a single bad request field.
+type BadRequest_FieldViolation struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A path that leads to a field in the request body. The value will be a
+	// sequence of dot-separated identifiers that identify a protocol buffer
+	// field.
+	//
+	// Consider the following:
+	//
+	//	message CreateContactRequest {
+	//	  message EmailAddress {
+	//	    enum Type {
+	//	      TYPE_UNSPECIFIED = 0;
+	//	      HOME = 1;
+	//	      WORK = 2;
+	//	    }
+	//
+	//	    optional string email = 1;
+	//	    repeated EmailType type = 2;
+	//	  }
+	//
+	//	  string full_name = 1;
+	//	  repeated EmailAddress email_addresses = 2;
+	//	}
+	//
+	// In this example, in proto `field` could take one of the following values:
+	//
+	//   - `full_name` for a violation in the `full_name` value
+	//   - `email_addresses[1].email` for a violation in the `email` field of the
+	//     first `email_addresses` message
+	//   - `email_addresses[3].type[2]` for a violation in the second `type`
+	//     value in the third `email_addresses` message.
+	//
+	// In JSON, the same values are represented as:
+	//
+	//   - `fullName` for a violation in the `fullName` value
+	//   - `emailAddresses[1].email` for a violation in the `email` field of the
+	//     first `emailAddresses` message
+	//   - `emailAddresses[3].type[2]` for a violation in the second `type`
+	//     value in the third `emailAddresses` message.
+	Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+	// A description of why the request element is bad.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *BadRequest_FieldViolation) Reset() {
+	*x = BadRequest_FieldViolation{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[13]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BadRequest_FieldViolation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest_FieldViolation) ProtoMessage() {}
+
+func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[13]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead.
+func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (x *BadRequest_FieldViolation) GetField() string {
+	if x != nil {
+		return x.Field
+	}
+	return ""
+}
+
+func (x *BadRequest_FieldViolation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// Describes a URL link.
+type Help_Link struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes what the link offers.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// The URL of the link.
+	Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *Help_Link) Reset() {
+	*x = Help_Link{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[14]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Help_Link) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help_Link) ProtoMessage() {}
+
+func (x *Help_Link) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[14]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead.
+func (*Help_Link) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (x *Help_Link) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Help_Link) GetUrl() string {
+	if x != nil {
+		return x.Url
+	}
+	return ""
+}
+
+var File_google_rpc_error_details_proto protoreflect.FileDescriptor
+
+var file_google_rpc_error_details_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72,
+	0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a,
+	0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65,
+	0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73,
+	0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65,
+	0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49,
+	0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d,
+	0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72,
+	0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64,
+	0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61,
+	0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23,
+	0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72,
+	0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c,
+	0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a,
+	0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75,
+	0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
+	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72,
+	0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
+	0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+	0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46,
+	0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09,
+	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a,
+	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61,
+	0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c,
+	0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e,
+	0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64,
+	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05,
+	0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49,
+	0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69,
+	0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+	0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61,
+	0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
+	0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72,
+	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
+	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70,
+	0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c,
+	0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a,
+	0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63,
+	0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a,
+	0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c,
+	0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42,
+	0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
+	0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70,
+	0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72,
+	0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_rpc_error_details_proto_rawDescOnce sync.Once
+	file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc
+)
+
+func file_google_rpc_error_details_proto_rawDescGZIP() []byte {
+	file_google_rpc_error_details_proto_rawDescOnce.Do(func() {
+		file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData)
+	})
+	return file_google_rpc_error_details_proto_rawDescData
+}
+
+var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_google_rpc_error_details_proto_goTypes = []interface{}{
+	(*ErrorInfo)(nil),                     // 0: google.rpc.ErrorInfo
+	(*RetryInfo)(nil),                     // 1: google.rpc.RetryInfo
+	(*DebugInfo)(nil),                     // 2: google.rpc.DebugInfo
+	(*QuotaFailure)(nil),                  // 3: google.rpc.QuotaFailure
+	(*PreconditionFailure)(nil),           // 4: google.rpc.PreconditionFailure
+	(*BadRequest)(nil),                    // 5: google.rpc.BadRequest
+	(*RequestInfo)(nil),                   // 6: google.rpc.RequestInfo
+	(*ResourceInfo)(nil),                  // 7: google.rpc.ResourceInfo
+	(*Help)(nil),                          // 8: google.rpc.Help
+	(*LocalizedMessage)(nil),              // 9: google.rpc.LocalizedMessage
+	nil,                                   // 10: google.rpc.ErrorInfo.MetadataEntry
+	(*QuotaFailure_Violation)(nil),        // 11: google.rpc.QuotaFailure.Violation
+	(*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation
+	(*BadRequest_FieldViolation)(nil),     // 13: google.rpc.BadRequest.FieldViolation
+	(*Help_Link)(nil),                     // 14: google.rpc.Help.Link
+	(*durationpb.Duration)(nil),           // 15: google.protobuf.Duration
+}
+var file_google_rpc_error_details_proto_depIdxs = []int32{
+	10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry
+	15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
+	11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation
+	12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
+	13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
+	14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
+	6,  // [6:6] is the sub-list for method output_type
+	6,  // [6:6] is the sub-list for method input_type
+	6,  // [6:6] is the sub-list for extension type_name
+	6,  // [6:6] is the sub-list for extension extendee
+	0,  // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_error_details_proto_init() }
+func file_google_rpc_error_details_proto_init() {
+	if File_google_rpc_error_details_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ErrorInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*RetryInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*DebugInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*QuotaFailure); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*PreconditionFailure); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BadRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*RequestInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ResourceInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Help); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*LocalizedMessage); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*QuotaFailure_Violation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*PreconditionFailure_Violation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BadRequest_FieldViolation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Help_Link); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_rpc_error_details_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   15,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_rpc_error_details_proto_goTypes,
+		DependencyIndexes: file_google_rpc_error_details_proto_depIdxs,
+		MessageInfos:      file_google_rpc_error_details_proto_msgTypes,
+	}.Build()
+	File_google_rpc_error_details_proto = out.File
+	file_google_rpc_error_details_proto_rawDesc = nil
+	file_google_rpc_error_details_proto_goTypes = nil
+	file_google_rpc_error_details_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
new file mode 100644
index 000000000..a6b508188
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -0,0 +1,203 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v3.21.9
+// source: google/rpc/status.proto
+
+package status
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The `Status` type defines a logical error model that is suitable for
+// different programming environments, including REST APIs and RPC APIs. It is
+// used by [gRPC](https://github.com/grpc). Each `Status` message contains
+// three pieces of data: error code, error message, and error details.
+//
+// You can find out more about this error model and how to work with it in the
+// [API Design Guide](https://cloud.google.com/apis/design/errors).
+type Status struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The status code, which should be an enum value of
+	// [google.rpc.Code][google.rpc.Code].
+	Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+	// A developer-facing error message, which should be in English. Any
+	// user-facing error message should be localized and sent in the
+	// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
+	// by the client.
+	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	// A list of messages that carry the error details.  There is a common set of
+	// message types for APIs to use.
+	Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
+}
+
+func (x *Status) Reset() {
+	*x = Status{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_status_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Status) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_status_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+	return file_google_rpc_status_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Status) GetCode() int32 {
+	if x != nil {
+		return x.Code
+	}
+	return 0
+}
+
+func (x *Status) GetMessage() string {
+	if x != nil {
+		return x.Message
+	}
+	return ""
+}
+
+func (x *Status) GetDetails() []*anypb.Any {
+	if x != nil {
+		return x.Details
+	}
+	return nil
+}
+
+var File_google_rpc_status_proto protoreflect.FileDescriptor
+
+var file_google_rpc_status_proto_rawDesc = []byte{
+	0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61,
+	0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f,
+	0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18,
+	0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61,
+	0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+	0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74,
+	0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+	0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74,
+	0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_rpc_status_proto_rawDescOnce sync.Once
+	file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc
+)
+
+func file_google_rpc_status_proto_rawDescGZIP() []byte {
+	file_google_rpc_status_proto_rawDescOnce.Do(func() {
+		file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData)
+	})
+	return file_google_rpc_status_proto_rawDescData
+}
+
+var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_rpc_status_proto_goTypes = []interface{}{
+	(*Status)(nil),    // 0: google.rpc.Status
+	(*anypb.Any)(nil), // 1: google.protobuf.Any
+}
+var file_google_rpc_status_proto_depIdxs = []int32{
+	1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any
+	1, // [1:1] is the sub-list for method output_type
+	1, // [1:1] is the sub-list for method input_type
+	1, // [1:1] is the sub-list for extension type_name
+	1, // [1:1] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_status_proto_init() }
+func file_google_rpc_status_proto_init() {
+	if File_google_rpc_status_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Status); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_rpc_status_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_rpc_status_proto_goTypes,
+		DependencyIndexes: file_google_rpc_status_proto_depIdxs,
+		MessageInfos:      file_google_rpc_status_proto_msgTypes,
+	}.Build()
+	File_google_rpc_status_proto = out.File
+	file_google_rpc_status_proto_rawDesc = nil
+	file_google_rpc_status_proto_goTypes = nil
+	file_google_rpc_status_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
new file mode 100644
index 000000000..d10ad6653
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
@@ -0,0 +1,23 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package field_mask aliases all exported identifiers in
+// package "google.golang.org/protobuf/types/known/fieldmaskpb".
+package field_mask
+
+import "google.golang.org/protobuf/types/known/fieldmaskpb"
+
+type FieldMask = fieldmaskpb.FieldMask
+
+var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto
diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS
new file mode 100644
index 000000000..e491a9e7f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
new file mode 100644
index 000000000..9d4213ebc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## Community Code of Conduct
+
+gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
new file mode 100644
index 000000000..52338d004
--- /dev/null
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -0,0 +1,60 @@
+# How to contribute
+
+We definitely welcome your patches and contributions to gRPC! Please read the gRPC
+organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
+and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
+
+If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
+
+## Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
+
+## Guidelines for Pull Requests
+How to get your contributions merged smoothly and quickly.
+
+- Create **small PRs** that are narrowly focused on **addressing a single
+  concern**. We often times receive PRs that are trying to fix several things at
+  a time, but only one fix is considered acceptable, nothing gets merged and
+  both author's & review's time is wasted. Create more PRs to address different
+  concerns and everyone will be happy.
+
+- The grpc package should only depend on standard Go packages and a small number
+  of exceptions. If your contribution introduces new dependencies which are NOT
+  in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
+  discussion with gRPC-Go authors and consultants.
+
+- For speculative changes, consider opening an issue and discussing it first. If
+  you are suggesting a behavioral or API change, consider starting with a [gRFC
+  proposal](https://github.com/grpc/proposal).
+
+- Provide a good **PR description** as a record of **what** change is being made
+  and **why** it was made. Link to a github issue if it exists.
+
+- Don't fix code style and formatting unless you are already changing that line
+  to address an issue. PRs with irrelevant changes won't be merged. If you do
+  want to fix formatting or style, do that in a separate PR.
+
+- Unless your PR is trivial, you should expect there will be reviewer comments
+  that you'll need to address before merging. We expect you to be reasonably
+  responsive to those comments, otherwise the PR will be closed after 2-3 weeks
+  of inactivity.
+
+- Maintain **clean commit history** and use **meaningful commit messages**. PRs
+  with messy commit history are difficult to review and won't be merged. Use
+  `rebase -i upstream/master` to curate your commit history and/or to bring in
+  latest changes from master (but avoid rebasing in the middle of a code
+  review).
+
+- Keep your PR up to date with upstream/master (if there are merge conflicts, we
+  can't really merge your change).
+
+- **All tests need to be passing** before your change can be merged. We
+  recommend you **run tests locally** before creating your PR to catch breakages
+  early on.
+  - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors
+  - `go test -cpu 1,4 -timeout 7m ./...` to run the tests
+  - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
+
+- Exceptions to the rules can be made if there's a compelling reason for doing so.
diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md
new file mode 100644
index 000000000..d6ff26747
--- /dev/null
+++ b/vendor/google.golang.org/grpc/GOVERNANCE.md
@@ -0,0 +1 @@
+This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md).
diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/google.golang.org/grpc/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
new file mode 100644
index 000000000..c6672c0a3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -0,0 +1,28 @@
+This page lists all active maintainers of this repository. If you were a
+maintainer and would like to add your name to the Emeritus list, please send us a
+PR.
+
+See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md)
+for governance guidelines and how to become a maintainer.
+See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md)
+for general contribution guidelines.
+
+## Maintainers (in alphabetical order)
+
+- [cesarghali](https://github.com/cesarghali), Google LLC
+- [dfawley](https://github.com/dfawley), Google LLC
+- [easwars](https://github.com/easwars), Google LLC
+- [menghanl](https://github.com/menghanl), Google LLC
+- [srini100](https://github.com/srini100), Google LLC
+
+## Emeritus Maintainers (in alphabetical order)
+- [adelez](https://github.com/adelez), Google LLC
+- [canguler](https://github.com/canguler), Google LLC
+- [iamqizhao](https://github.com/iamqizhao), Google LLC
+- [jadekler](https://github.com/jadekler), Google LLC
+- [jtattermusch](https://github.com/jtattermusch), Google LLC
+- [lyuxuan](https://github.com/lyuxuan), Google LLC
+- [makmukhi](https://github.com/makmukhi), Google LLC
+- [matt-kwong](https://github.com/matt-kwong), Google LLC
+- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
+- [yongni](https://github.com/yongni), Google LLC
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
new file mode 100644
index 000000000..1f8960922
--- /dev/null
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -0,0 +1,46 @@
+all: vet test testrace
+
+build:
+	go build google.golang.org/grpc/...
+
+clean:
+	go clean -i google.golang.org/grpc/...
+
+deps:
+	GO111MODULE=on go get -d -v google.golang.org/grpc/...
+
+proto:
+	@ if ! which protoc > /dev/null; then \
+		echo "error: protoc not installed" >&2; \
+		exit 1; \
+	fi
+	go generate google.golang.org/grpc/...
+
+test:
+	go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
+
+testsubmodule:
+	cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
+	cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/...
+
+testrace:
+	go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
+
+testdeps:
+	GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
+
+vet: vetdeps
+	./vet.sh
+
+vetdeps:
+	./vet.sh -install
+
+.PHONY: \
+	all \
+	build \
+	clean \
+	proto \
+	test \
+	testrace \
+	vet \
+	vetdeps
diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt
new file mode 100644
index 000000000..530197749
--- /dev/null
+++ b/vendor/google.golang.org/grpc/NOTICE.txt
@@ -0,0 +1,13 @@
+Copyright 2014 gRPC authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
new file mode 100644
index 000000000..0e6ae69a5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/README.md
@@ -0,0 +1,141 @@
+# gRPC-Go
+
+[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
+[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
+[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
+
+The [Go][] implementation of [gRPC][]: A high performance, open source, general
+RPC framework that puts mobile and HTTP/2 first. For more information see the
+[Go gRPC docs][], or jump directly into the [quick start][].
+
+## Prerequisites
+
+- **[Go][]**: any one of the **three latest major** [releases][go-releases].
+
+## Installation
+
+With [Go module][] support (Go 1.11+), simply add the following import
+
+```go
+import "google.golang.org/grpc"
+```
+
+to your code, and then `go [build|run|test]` will automatically fetch the
+necessary dependencies.
+
+Otherwise, to install the `grpc-go` package, run the following command:
+
+```console
+$ go get -u google.golang.org/grpc
+```
+
+> **Note:** If you are trying to access `grpc-go` from **China**, see the
+> [FAQ](#FAQ) below.
+
+## Learn more
+
+- [Go gRPC docs][], which include a [quick start][] and [API
+  reference][API] among other resources
+- [Low-level technical docs](Documentation) from this repository
+- [Performance benchmark][]
+- [Examples](examples)
+
+## FAQ
+
+### I/O Timeout Errors
+
+The `golang.org` domain may be blocked from some countries. `go get` usually
+produces an error like the following when this happens:
+
+```console
+$ go get -u google.golang.org/grpc
+package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
+```
+
+To build Go code, there are several options:
+
+- Set up a VPN and access google.golang.org through that.
+
+- Without Go module support: `git clone` the repo manually:
+
+  ```sh
+  git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
+  ```
+
+  You will need to do the same for all of grpc's dependencies in `golang.org`,
+  e.g. `golang.org/x/net`.
+
+- With Go module support: it is possible to use the `replace` feature of `go
+  mod` to create aliases for golang.org packages.  In your project's directory:
+
+  ```sh
+  go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
+  go mod tidy
+  go mod vendor
+  go build -mod=vendor
+  ```
+
+  Again, this will need to be done for all transitive dependencies hosted on
+  golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652).
+
+### Compiling error, undefined: grpc.SupportPackageIsVersion
+
+#### If you are using Go modules:
+
+Ensure your gRPC-Go version is `require`d at the appropriate version in
+the same module containing the generated `.pb.go` files.  For example,
+`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
+
+```go
+module <your module name>
+
+require (
+    google.golang.org/grpc v1.27.0
+)
+```
+
+#### If you are *not* using Go modules:
+
+Update the `proto` package, gRPC package, and rebuild the `.proto` files:
+
+```sh
+go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
+go get -u google.golang.org/grpc
+protoc --go_out=plugins=grpc:. *.proto
+```
+
+### How to turn on logging
+
+The default logger is controlled by environment variables. Turn everything on
+like this:
+
+```console
+$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
+$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
+```
+
+### The RPC failed with error `"code = Unavailable desc = transport is closing"`
+
+This error means the connection the RPC is using was closed, and there are many
+possible reasons, including:
+ 1. mis-configured transport credentials, connection failed on handshaking
+ 1. bytes disrupted, possibly by a proxy in between
+ 1. server shutdown
+ 1. Keepalive parameters caused connection shutdown, for example if you have configured
+    your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
+    If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
+    to allow longer RPC calls to finish.
+
+It can be tricky to debug this because the error happens on the client side but
+the root cause of the connection being closed is on the server side. Turn on
+logging on __both client and server__, and see if there are any transport
+errors.
+
+[API]: https://pkg.go.dev/google.golang.org/grpc
+[Go]: https://golang.org
+[Go module]: https://github.com/golang/go/wiki/Modules
+[gRPC]: https://grpc.io
+[Go gRPC docs]: https://grpc.io/docs/languages/go
+[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608
+[quick start]: https://grpc.io/docs/languages/go/quickstart
+[go-releases]: https://golang.org/doc/devel/release.html
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
new file mode 100644
index 000000000..be6e10870
--- /dev/null
+++ b/vendor/google.golang.org/grpc/SECURITY.md
@@ -0,0 +1,3 @@
+# Security Policy
+
+For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
new file mode 100644
index 000000000..02f5dc531
--- /dev/null
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -0,0 +1,101 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package attributes defines a generic key/value store used in various gRPC
+// components.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package attributes
+
+// Attributes is an immutable struct for storing and retrieving generic
+// key/value pairs.  Keys must be hashable, and users should define their own
+// types for keys.  Values should not be modified after they are added to an
+// Attributes or if they were received from one.  If values implement 'Equal(o
+// interface{}) bool', it will be called by (*Attributes).Equal to determine
+// whether two values with the same key should be considered equal.
+type Attributes struct {
+	m map[interface{}]interface{}
+}
+
+// New returns a new Attributes containing the key/value pair.
+func New(key, value interface{}) *Attributes {
+	return &Attributes{m: map[interface{}]interface{}{key: value}}
+}
+
+// WithValue returns a new Attributes containing the previous keys and values
+// and the new key/value pair.  If the same key appears multiple times, the
+// last value overwrites all previous values for that key.  To remove an
+// existing key, use a nil value.  value should not be modified later.
+func (a *Attributes) WithValue(key, value interface{}) *Attributes {
+	if a == nil {
+		return New(key, value)
+	}
+	n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)}
+	for k, v := range a.m {
+		n.m[k] = v
+	}
+	n.m[key] = value
+	return n
+}
+
+// Value returns the value associated with these attributes for key, or nil if
+// no value is associated with key.  The returned value should not be modified.
+func (a *Attributes) Value(key interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
+	return a.m[key]
+}
+
+// Equal returns whether a and o are equivalent.  If 'Equal(o interface{})
+// bool' is implemented for a value in the attributes, it is called to
+// determine if the value matches the one stored in the other attributes.  If
+// Equal is not implemented, standard equality is used to determine if the two
+// values are equal. Note that some types (e.g. maps) aren't comparable by
+// default, so they must be wrapped in a struct, or in an alias type, with Equal
+// defined.
+func (a *Attributes) Equal(o *Attributes) bool {
+	if a == nil && o == nil {
+		return true
+	}
+	if a == nil || o == nil {
+		return false
+	}
+	if len(a.m) != len(o.m) {
+		return false
+	}
+	for k, v := range a.m {
+		ov, ok := o.m[k]
+		if !ok {
+			// o missing element of a
+			return false
+		}
+		if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok {
+			if !eq.Equal(ov) {
+				return false
+			}
+		} else if v != ov {
+			// Fallback to a standard equality check if Value is unimplemented.
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
new file mode 100644
index 000000000..29475e31c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// See internal/backoff package for the backoff implementation. This file is
+// kept for the exported types and API backward compatibility.
+
+package grpc
+
+import (
+	"time"
+
+	"google.golang.org/grpc/backoff"
+)
+
+// DefaultBackoffConfig uses values specified for backoff in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
+var DefaultBackoffConfig = BackoffConfig{
+	MaxDelay: 120 * time.Second,
+}
+
+// BackoffConfig defines the parameters for the default gRPC backoff strategy.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
+type BackoffConfig struct {
+	// MaxDelay is the upper bound of backoff delay.
+	MaxDelay time.Duration
+}
+
+// ConnectParams defines the parameters for connecting and retrying. Users are
+// encouraged to use this instead of the BackoffConfig type defined above. See
+// here for more details:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ConnectParams struct {
+	// Backoff specifies the configuration options for connection backoff.
+	Backoff backoff.Config
+	// MinConnectTimeout is the minimum amount of time we are willing to give a
+	// connection to complete.
+	MinConnectTimeout time.Duration
+}
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
new file mode 100644
index 000000000..0787d0b50
--- /dev/null
+++ b/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package backoff provides configuration options for backoff.
+//
+// More details can be found at:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// All APIs in this package are experimental.
+package backoff
+
+import "time"
+
+// Config defines the configuration options for backoff.
+type Config struct {
+	// BaseDelay is the amount of time to backoff after the first failure.
+	BaseDelay time.Duration
+	// Multiplier is the factor with which to multiply backoffs after a
+	// failed retry. Should ideally be greater than 1.
+	Multiplier float64
+	// Jitter is the factor with which backoffs are randomized.
+	Jitter float64
+	// MaxDelay is the upper bound of backoff delay.
+	MaxDelay time.Duration
+}
+
+// DefaultConfig is a backoff configuration with the default values specfied
+// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// This should be useful for callers who want to configure backoff with
+// non-default values only for a subset of the options.
+var DefaultConfig = Config{
+	BaseDelay:  1.0 * time.Second,
+	Multiplier: 1.6,
+	Jitter:     0.2,
+	MaxDelay:   120 * time.Second,
+}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
new file mode 100644
index 000000000..09d61dd1b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -0,0 +1,404 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package balancer defines APIs for load balancing in gRPC.
+// All APIs in this package are experimental.
+package balancer
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"net"
+	"strings"
+
+	"google.golang.org/grpc/channelz"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+var (
+	// m is a map from name to balancer builder.
+	m = make(map[string]Builder)
+)
+
+// Register registers the balancer builder to the balancer map. b.Name
+// (lowercased) will be used as the name registered with this builder.  If the
+// Builder implements ConfigParser, ParseConfig will be called when new service
+// configs are received by the resolver, and the result will be provided to the
+// Balancer in UpdateClientConnState.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Balancers are
+// registered with the same name, the one registered last will take effect.
+func Register(b Builder) {
+	m[strings.ToLower(b.Name())] = b
+}
+
+// unregisterForTesting deletes the balancer with the given name from the
+// balancer map.
+//
+// This function is not thread-safe.
+func unregisterForTesting(name string) {
+	delete(m, name)
+}
+
+func init() {
+	internal.BalancerUnregister = unregisterForTesting
+}
+
+// Get returns the resolver builder registered with the given name.
+// Note that the compare is done in a case-insensitive fashion.
+// If no builder is register with the name, nil will be returned.
+func Get(name string) Builder {
+	if b, ok := m[strings.ToLower(name)]; ok {
+		return b
+	}
+	return nil
+}
+
+// A SubConn represents a single connection to a gRPC backend service.
+//
+// Each SubConn contains a list of addresses.
+//
+// All SubConns start in IDLE, and will not try to connect. To trigger the
+// connecting, Balancers must call Connect.  If a connection re-enters IDLE,
+// Balancers must call Connect again to trigger a new connection attempt.
+//
+// gRPC will try to connect to the addresses in sequence, and stop trying the
+// remainder once the first connection is successful. If an attempt to connect
+// to all addresses encounters an error, the SubConn will enter
+// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE.
+//
+// Once established, if a connection is lost, the SubConn will transition
+// directly to IDLE.
+//
+// This interface is to be implemented by gRPC. Users should not need their own
+// implementation of this interface. For situations like testing, any
+// implementations should embed this interface. This allows gRPC to add new
+// methods to this interface.
+type SubConn interface {
+	// UpdateAddresses updates the addresses used in this SubConn.
+	// gRPC checks if currently-connected address is still in the new list.
+	// If it's in the list, the connection will be kept.
+	// If it's not in the list, the connection will gracefully closed, and
+	// a new connection will be created.
+	//
+	// This will trigger a state transition for the SubConn.
+	//
+	// Deprecated: This method is now part of the ClientConn interface and will
+	// eventually be removed from here.
+	UpdateAddresses([]resolver.Address)
+	// Connect starts the connecting for this SubConn.
+	Connect()
+	// GetOrBuildProducer returns a reference to the existing Producer for this
+	// ProducerBuilder in this SubConn, or, if one does not currently exist,
+	// creates a new one and returns it.  Returns a close function which must
+	// be called when the Producer is no longer needed.
+	GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
+}
+
+// NewSubConnOptions contains options to create new SubConn.
+type NewSubConnOptions struct {
+	// CredsBundle is the credentials bundle that will be used in the created
+	// SubConn. If it's nil, the original creds from grpc DialOptions will be
+	// used.
+	//
+	// Deprecated: Use the Attributes field in resolver.Address to pass
+	// arbitrary data to the credential handshaker.
+	CredsBundle credentials.Bundle
+	// HealthCheckEnabled indicates whether health check service should be
+	// enabled on this SubConn
+	HealthCheckEnabled bool
+}
+
+// State contains the balancer's state relevant to the gRPC ClientConn.
+type State struct {
+	// State contains the connectivity state of the balancer, which is used to
+	// determine the state of the ClientConn.
+	ConnectivityState connectivity.State
+	// Picker is used to choose connections (SubConns) for RPCs.
+	Picker Picker
+}
+
+// ClientConn represents a gRPC ClientConn.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
+type ClientConn interface {
+	// NewSubConn is called by balancer to create a new SubConn.
+	// It doesn't block and wait for the connections to be established.
+	// Behaviors of the SubConn can be controlled by options.
+	NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
+	// RemoveSubConn removes the SubConn from ClientConn.
+	// The SubConn will be shutdown.
+	RemoveSubConn(SubConn)
+	// UpdateAddresses updates the addresses used in the passed in SubConn.
+	// gRPC checks if the currently connected address is still in the new list.
+	// If so, the connection will be kept. Else, the connection will be
+	// gracefully closed, and a new connection will be created.
+	//
+	// This will trigger a state transition for the SubConn.
+	UpdateAddresses(SubConn, []resolver.Address)
+
+	// UpdateState notifies gRPC that the balancer's internal state has
+	// changed.
+	//
+	// gRPC will update the connectivity state of the ClientConn, and will call
+	// Pick on the new Picker to pick new SubConns.
+	UpdateState(State)
+
+	// ResolveNow is called by balancer to notify gRPC to do a name resolving.
+	ResolveNow(resolver.ResolveNowOptions)
+
+	// Target returns the dial target for this ClientConn.
+	//
+	// Deprecated: Use the Target field in the BuildOptions instead.
+	Target() string
+}
+
+// BuildOptions contains additional information for Build.
+type BuildOptions struct {
+	// DialCreds is the transport credentials to use when communicating with a
+	// remote load balancer server. Balancer implementations which do not
+	// communicate with a remote load balancer server can ignore this field.
+	DialCreds credentials.TransportCredentials
+	// CredsBundle is the credentials bundle to use when communicating with a
+	// remote load balancer server. Balancer implementations which do not
+	// communicate with a remote load balancer server can ignore this field.
+	CredsBundle credentials.Bundle
+	// Dialer is the custom dialer to use when communicating with a remote load
+	// balancer server. Balancer implementations which do not communicate with a
+	// remote load balancer server can ignore this field.
+	Dialer func(context.Context, string) (net.Conn, error)
+	// Authority is the server name to use as part of the authentication
+	// handshake when communicating with a remote load balancer server. Balancer
+	// implementations which do not communicate with a remote load balancer
+	// server can ignore this field.
+	Authority string
+	// ChannelzParentID is the parent ClientConn's channelz ID.
+	ChannelzParentID *channelz.Identifier
+	// CustomUserAgent is the custom user agent set on the parent ClientConn.
+	// The balancer should set the same custom user agent if it creates a
+	// ClientConn.
+	CustomUserAgent string
+	// Target contains the parsed address info of the dial target. It is the
+	// same resolver.Target as passed to the resolver. See the documentation for
+	// the resolver.Target type for details about what it contains.
+	Target resolver.Target
+}
+
+// Builder creates a balancer.
+type Builder interface {
+	// Build creates a new balancer with the ClientConn.
+	Build(cc ClientConn, opts BuildOptions) Balancer
+	// Name returns the name of balancers built by this builder.
+	// It will be used to pick balancers (for example in service config).
+	Name() string
+}
+
+// ConfigParser parses load balancer configs.
+type ConfigParser interface {
+	// ParseConfig parses the JSON load balancer config provided into an
+	// internal form or returns an error if the config is invalid.  For future
+	// compatibility reasons, unknown fields in the config should be ignored.
+	ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
+}
+
+// PickInfo contains additional information for the Pick operation.
+type PickInfo struct {
+	// FullMethodName is the method name that NewClientStream() is called
+	// with. The canonical format is /service/Method.
+	FullMethodName string
+	// Ctx is the RPC's context, and may contain relevant RPC-level information
+	// like the outgoing header metadata.
+	Ctx context.Context
+}
+
+// DoneInfo contains additional information for done.
+type DoneInfo struct {
+	// Err is the rpc error the RPC finished with. It could be nil.
+	Err error
+	// Trailer contains the metadata from the RPC's trailer, if present.
+	Trailer metadata.MD
+	// BytesSent indicates if any bytes have been sent to the server.
+	BytesSent bool
+	// BytesReceived indicates if any byte has been received from the server.
+	BytesReceived bool
+	// ServerLoad is the load received from server. It's usually sent as part of
+	// trailing metadata.
+	//
+	// The only supported type now is *orca_v3.LoadReport.
+	ServerLoad interface{}
+}
+
+var (
+	// ErrNoSubConnAvailable indicates no SubConn is available for pick().
+	// gRPC will block the RPC until a new picker is available via UpdateState().
+	ErrNoSubConnAvailable = errors.New("no SubConn is available")
+	// ErrTransientFailure indicates all SubConns are in TransientFailure.
+	// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
+	//
+	// Deprecated: return an appropriate error based on the last resolution or
+	// connection attempt instead.  The behavior is the same for any non-gRPC
+	// status error.
+	ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
+)
+
+// PickResult contains information related to a connection chosen for an RPC.
+type PickResult struct {
+	// SubConn is the connection to use for this pick, if its state is Ready.
+	// If the state is not Ready, gRPC will block the RPC until a new Picker is
+	// provided by the balancer (using ClientConn.UpdateState).  The SubConn
+	// must be one returned by ClientConn.NewSubConn.
+	SubConn SubConn
+
+	// Done is called when the RPC is completed.  If the SubConn is not ready,
+	// this will be called with a nil parameter.  If the SubConn is not a valid
+	// type, Done may not be called.  May be nil if the balancer does not wish
+	// to be notified when the RPC completes.
+	Done func(DoneInfo)
+
+	// Metadata provides a way for LB policies to inject arbitrary per-call
+	// metadata. Any metadata returned here will be merged with existing
+	// metadata added by the client application.
+	//
+	// LB policies with child policies are responsible for propagating metadata
+	// injected by their children to the ClientConn, as part of Pick().
+	Metatada metadata.MD
+}
+
+// TransientFailureError returns e.  It exists for backward compatibility and
+// will be deleted soon.
+//
+// Deprecated: no longer necessary, picker errors are treated this way by
+// default.
+func TransientFailureError(e error) error { return e }
+
+// Picker is used by gRPC to pick a SubConn to send an RPC.
+// Balancer is expected to generate a new picker from its snapshot every time its
+// internal state has changed.
+//
+// The pickers used by gRPC can be updated by ClientConn.UpdateState().
+type Picker interface {
+	// Pick returns the connection to use for this RPC and related information.
+	//
+	// Pick should not block.  If the balancer needs to do I/O or any blocking
+	// or time-consuming work to service this call, it should return
+	// ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
+	// the Picker is updated (using ClientConn.UpdateState).
+	//
+	// If an error is returned:
+	//
+	// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
+	//   Picker is provided by the balancer (using ClientConn.UpdateState).
+	//
+	// - If the error is a status error (implemented by the grpc/status
+	//   package), gRPC will terminate the RPC with the code and message
+	//   provided.
+	//
+	// - For all other errors, wait for ready RPCs will wait, but non-wait for
+	//   ready RPCs will be terminated with this error's Error() string and
+	//   status code Unavailable.
+	Pick(info PickInfo) (PickResult, error)
+}
+
+// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
+// the connectivity states.
+//
+// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
+//
+// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
+// guaranteed to be called synchronously from the same goroutine.  There's no
+// guarantee on picker.Pick, it may be called anytime.
+type Balancer interface {
+	// UpdateClientConnState is called by gRPC when the state of the ClientConn
+	// changes.  If the error returned is ErrBadResolverState, the ClientConn
+	// will begin calling ResolveNow on the active name resolver with
+	// exponential backoff until a subsequent call to UpdateClientConnState
+	// returns a nil error.  Any other errors are currently ignored.
+	UpdateClientConnState(ClientConnState) error
+	// ResolverError is called by gRPC when the name resolver reports an error.
+	ResolverError(error)
+	// UpdateSubConnState is called by gRPC when the state of a SubConn
+	// changes.
+	UpdateSubConnState(SubConn, SubConnState)
+	// Close closes the balancer. The balancer is not required to call
+	// ClientConn.RemoveSubConn for its existing SubConns.
+	Close()
+}
+
+// ExitIdler is an optional interface for balancers to implement.  If
+// implemented, ExitIdle will be called when ClientConn.Connect is called, if
+// the ClientConn is idle.  If unimplemented, ClientConn.Connect will cause
+// all SubConns to connect.
+//
+// Notice: it will be required for all balancers to implement this in a future
+// release.
+type ExitIdler interface {
+	// ExitIdle instructs the LB policy to reconnect to backends / exit the
+	// IDLE state, if appropriate and possible.  Note that SubConns that enter
+	// the IDLE state will not reconnect until SubConn.Connect is called.
+	ExitIdle()
+}
+
+// SubConnState describes the state of a SubConn.
+type SubConnState struct {
+	// ConnectivityState is the connectivity state of the SubConn.
+	ConnectivityState connectivity.State
+	// ConnectionError is set if the ConnectivityState is TransientFailure,
+	// describing the reason the SubConn failed.  Otherwise, it is nil.
+	ConnectionError error
+}
+
+// ClientConnState describes the state of a ClientConn relevant to the
+// balancer.
+type ClientConnState struct {
+	ResolverState resolver.State
+	// The parsed load balancing configuration returned by the builder's
+	// ParseConfig method, if implemented.
+	BalancerConfig serviceconfig.LoadBalancingConfig
+}
+
+// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
+// problem with the provided name resolver data.
+var ErrBadResolverState = errors.New("bad resolver state")
+
+// A ProducerBuilder is a simple constructor for a Producer.  It is used by the
+// SubConn to create producers when needed.
+type ProducerBuilder interface {
+	// Build creates a Producer.  The first parameter is always a
+	// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
+	// associated SubConn), but is declared as interface{} to avoid a
+	// dependency cycle.  Should also return a close function that will be
+	// called when all references to the Producer have been given up.
+	Build(grpcClientConnInterface interface{}) (p Producer, close func())
+}
+
+// A Producer is a type shared among potentially many consumers.  It is
+// associated with a SubConn, and an implementation will typically contain
+// other methods to provide additional functionality, e.g. configuration or
+// subscription registration.
+type Producer interface {
+}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
new file mode 100644
index 000000000..3929c26d3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -0,0 +1,254 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package base
+
+import (
+	"errors"
+	"fmt"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/resolver"
+)
+
+var logger = grpclog.Component("balancer")
+
+type baseBuilder struct {
+	name          string
+	pickerBuilder PickerBuilder
+	config        Config
+}
+
+func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+	bal := &baseBalancer{
+		cc:            cc,
+		pickerBuilder: bb.pickerBuilder,
+
+		subConns: resolver.NewAddressMap(),
+		scStates: make(map[balancer.SubConn]connectivity.State),
+		csEvltr:  &balancer.ConnectivityStateEvaluator{},
+		config:   bb.config,
+		state:    connectivity.Connecting,
+	}
+	// Initialize picker to a picker that always returns
+	// ErrNoSubConnAvailable, because when state of a SubConn changes, we
+	// may call UpdateState with this picker.
+	bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
+	return bal
+}
+
+func (bb *baseBuilder) Name() string {
+	return bb.name
+}
+
+type baseBalancer struct {
+	cc            balancer.ClientConn
+	pickerBuilder PickerBuilder
+
+	csEvltr *balancer.ConnectivityStateEvaluator
+	state   connectivity.State
+
+	subConns *resolver.AddressMap
+	scStates map[balancer.SubConn]connectivity.State
+	picker   balancer.Picker
+	config   Config
+
+	resolverErr error // the last error reported by the resolver; cleared on successful resolution
+	connErr     error // the last connection error; cleared upon leaving TransientFailure
+}
+
+func (b *baseBalancer) ResolverError(err error) {
+	b.resolverErr = err
+	if b.subConns.Len() == 0 {
+		b.state = connectivity.TransientFailure
+	}
+
+	if b.state != connectivity.TransientFailure {
+		// The picker will not change since the balancer does not currently
+		// report an error.
+		return
+	}
+	b.regeneratePicker()
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: b.state,
+		Picker:            b.picker,
+	})
+}
+
+func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
+	// TODO: handle s.ResolverState.ServiceConfig?
+	if logger.V(2) {
+		logger.Info("base.baseBalancer: got new ClientConn state: ", s)
+	}
+	// Successful resolution; clear resolver error and ensure we return nil.
+	b.resolverErr = nil
+	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
+	addrsSet := resolver.NewAddressMap()
+	for _, a := range s.ResolverState.Addresses {
+		addrsSet.Set(a, nil)
+		if _, ok := b.subConns.Get(a); !ok {
+			// a is a new address (not existing in b.subConns).
+			sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
+			if err != nil {
+				logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
+				continue
+			}
+			b.subConns.Set(a, sc)
+			b.scStates[sc] = connectivity.Idle
+			b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle)
+			sc.Connect()
+		}
+	}
+	for _, a := range b.subConns.Keys() {
+		sci, _ := b.subConns.Get(a)
+		sc := sci.(balancer.SubConn)
+		// a was removed by resolver.
+		if _, ok := addrsSet.Get(a); !ok {
+			b.cc.RemoveSubConn(sc)
+			b.subConns.Delete(a)
+			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
+			// The entry will be deleted in UpdateSubConnState.
+		}
+	}
+	// If resolver state contains no addresses, return an error so ClientConn
+	// will trigger re-resolve. Also records this as an resolver error, so when
+	// the overall state turns transient failure, the error message will have
+	// the zero address information.
+	if len(s.ResolverState.Addresses) == 0 {
+		b.ResolverError(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+
+	b.regeneratePicker()
+	b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
+	return nil
+}
+
+// mergeErrors builds an error from the last connection error and the last
+// resolver error.  Must only be called if b.state is TransientFailure.
+func (b *baseBalancer) mergeErrors() error {
+	// connErr must always be non-nil unless there are no SubConns, in which
+	// case resolverErr must be non-nil.
+	if b.connErr == nil {
+		return fmt.Errorf("last resolver error: %v", b.resolverErr)
+	}
+	if b.resolverErr == nil {
+		return fmt.Errorf("last connection error: %v", b.connErr)
+	}
+	return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
+}
+
+// regeneratePicker takes a snapshot of the balancer, and generates a picker
+// from it. The picker is
+//   - errPicker if the balancer is in TransientFailure,
+//   - built by the pickerBuilder with all READY SubConns otherwise.
+func (b *baseBalancer) regeneratePicker() {
+	if b.state == connectivity.TransientFailure {
+		b.picker = NewErrPicker(b.mergeErrors())
+		return
+	}
+	readySCs := make(map[balancer.SubConn]SubConnInfo)
+
+	// Filter out all ready SCs from full subConn map.
+	for _, addr := range b.subConns.Keys() {
+		sci, _ := b.subConns.Get(addr)
+		sc := sci.(balancer.SubConn)
+		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+			readySCs[sc] = SubConnInfo{Address: addr}
+		}
+	}
+	b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
+}
+
+func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+	s := state.ConnectivityState
+	if logger.V(2) {
+		logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+	}
+	oldS, ok := b.scStates[sc]
+	if !ok {
+		if logger.V(2) {
+			logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		}
+		return
+	}
+	if oldS == connectivity.TransientFailure &&
+		(s == connectivity.Connecting || s == connectivity.Idle) {
+		// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or
+		// CONNECTING transitions to prevent the aggregated state from being
+		// always CONNECTING when many backends exist but are all down.
+		if s == connectivity.Idle {
+			sc.Connect()
+		}
+		return
+	}
+	b.scStates[sc] = s
+	switch s {
+	case connectivity.Idle:
+		sc.Connect()
+	case connectivity.Shutdown:
+		// When an address was removed by resolver, b called RemoveSubConn but
+		// kept the sc's state in scStates. Remove state for this sc here.
+		delete(b.scStates, sc)
+	case connectivity.TransientFailure:
+		// Save error to be reported via picker.
+		b.connErr = state.ConnectionError
+	}
+
+	b.state = b.csEvltr.RecordTransition(oldS, s)
+
+	// Regenerate picker when one of the following happens:
+	//  - this sc entered or left ready
+	//  - the aggregated state of balancer is TransientFailure
+	//    (may need to update error message)
+	if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
+		b.state == connectivity.TransientFailure {
+		b.regeneratePicker()
+	}
+	b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
+}
+
+// Close is a nop because base balancer doesn't have internal state to clean up,
+// and it doesn't need to call RemoveSubConn for the SubConns.
+func (b *baseBalancer) Close() {
+}
+
+// ExitIdle is a nop because the base balancer attempts to stay connected to
+// all SubConns at all times.
+func (b *baseBalancer) ExitIdle() {
+}
+
+// NewErrPicker returns a Picker that always returns err on Pick().
+func NewErrPicker(err error) balancer.Picker {
+	return &errPicker{err: err}
+}
+
+// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
+//
+// Deprecated: use NewErrPicker instead.
+var NewErrPickerV2 = NewErrPicker
+
+type errPicker struct {
+	err error // Pick() always returns this err.
+}
+
+func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+	return balancer.PickResult{}, p.err
+}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
new file mode 100644
index 000000000..e31d76e33
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -0,0 +1,71 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package base defines a balancer base that can be used to build balancers with
+// different picking algorithms.
+//
+// The base balancer creates a new SubConn for each resolved address. The
+// provided picker will only be notified about READY SubConns.
+//
+// This package is the base of round_robin balancer, its purpose is to be used
+// to build round_robin like balancers with complex picking algorithms.
+// Balancers with more complicated logic should try to implement a balancer
+// builder from scratch.
+//
+// All APIs in this package are experimental.
+package base
+
+import (
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// PickerBuilder creates balancer.Picker.
+type PickerBuilder interface {
+	// Build returns a picker that will be used by gRPC to pick a SubConn.
+	Build(info PickerBuildInfo) balancer.Picker
+}
+
+// PickerBuildInfo contains information needed by the picker builder to
+// construct a picker.
+type PickerBuildInfo struct {
+	// ReadySCs is a map from all ready SubConns to the Addresses used to
+	// create them.
+	ReadySCs map[balancer.SubConn]SubConnInfo
+}
+
+// SubConnInfo contains information about a SubConn created by the base
+// balancer.
+type SubConnInfo struct {
+	Address resolver.Address // the address used to create this SubConn
+}
+
+// Config contains the config info about the base balancer builder.
+type Config struct {
+	// HealthCheck indicates whether health checking should be enabled for this specific balancer.
+	HealthCheck bool
+}
+
+// NewBalancerBuilder returns a base balancer builder configured by the provided config.
+func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
+	return &baseBuilder{
+		name:          name,
+		pickerBuilder: pb,
+		config:        config,
+	}
+}
diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
new file mode 100644
index 000000000..c33413581
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package balancer
+
+import "google.golang.org/grpc/connectivity"
+
+// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
+// and returns one aggregated connectivity state.
+//
+// It's not thread safe.
+type ConnectivityStateEvaluator struct {
+	numReady            uint64 // Number of addrConns in ready state.
+	numConnecting       uint64 // Number of addrConns in connecting state.
+	numTransientFailure uint64 // Number of addrConns in transient failure state.
+	numIdle             uint64 // Number of addrConns in idle state.
+}
+
+// RecordTransition records state change happening in subConn and based on that
+// it evaluates what aggregated state should be.
+//
+//   - If at least one SubConn in Ready, the aggregated state is Ready;
+//   - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
+//   - Else if at least one SubConn is Idle, the aggregated state is Idle;
+//   - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
+//
+// Shutdown is not considered.
+func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
+	// Update counters.
+	for idx, state := range []connectivity.State{oldState, newState} {
+		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+		switch state {
+		case connectivity.Ready:
+			cse.numReady += updateVal
+		case connectivity.Connecting:
+			cse.numConnecting += updateVal
+		case connectivity.TransientFailure:
+			cse.numTransientFailure += updateVal
+		case connectivity.Idle:
+			cse.numIdle += updateVal
+		}
+	}
+	return cse.CurrentState()
+}
+
+// CurrentState returns the current aggregate conn state by evaluating the counters
+func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
+	// Evaluate.
+	if cse.numReady > 0 {
+		return connectivity.Ready
+	}
+	if cse.numConnecting > 0 {
+		return connectivity.Connecting
+	}
+	if cse.numIdle > 0 {
+		return connectivity.Idle
+	}
+	return connectivity.TransientFailure
+}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
new file mode 100644
index 000000000..4ecfa1c21
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package state declares grpclb types to be set by resolvers wishing to pass
+// information to grpclb via resolver.State Attributes.
+package state
+
+import (
+	"google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.grpclb.state")
+
+// State contains gRPCLB-relevant data passed from the name resolver.
+type State struct {
+	// BalancerAddresses contains the remote load balancer address(es).  If
+	// set, overrides any resolver-provided addresses with Type of GRPCLB.
+	BalancerAddresses []resolver.Address
+}
+
+// Set returns a copy of the provided state with attributes containing s.  s's
+// data should not be mutated after calling Set.
+func Set(state resolver.State, s *State) resolver.State {
+	state.Attributes = state.Attributes.WithValue(key, s)
+	return state
+}
+
+// Get returns the grpclb State in the resolver.State, or nil if not present.
+// The returned data should not be mutated.
+func Get(state resolver.State) *State {
+	s, _ := state.Attributes.Value(key).(*State)
+	return s
+}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
new file mode 100644
index 000000000..f7031ad22
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
+// installed as one of the default balancers in gRPC, users don't need to
+// explicitly install this balancer.
+package roundrobin
+
+import (
+	"sync/atomic"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/grpcrand"
+)
+
+// Name is the name of round_robin balancer.
+const Name = "round_robin"
+
+var logger = grpclog.Component("roundrobin")
+
+// newBuilder creates a new roundrobin balancer builder.
+func newBuilder() balancer.Builder {
+	return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
+}
+
+func init() {
+	balancer.Register(newBuilder())
+}
+
+type rrPickerBuilder struct{}
+
+func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
+	logger.Infof("roundrobinPicker: Build called with info: %v", info)
+	if len(info.ReadySCs) == 0 {
+		return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
+	}
+	scs := make([]balancer.SubConn, 0, len(info.ReadySCs))
+	for sc := range info.ReadySCs {
+		scs = append(scs, sc)
+	}
+	return &rrPicker{
+		subConns: scs,
+		// Start at a random index, as the same RR balancer rebuilds a new
+		// picker when SubConn states change, and we don't want to apply excess
+		// load to the first server in the list.
+		next: uint32(grpcrand.Intn(len(scs))),
+	}
+}
+
+type rrPicker struct {
+	// subConns is the snapshot of the roundrobin balancer when this picker was
+	// created. The slice is immutable. Each Get() will do a round robin
+	// selection from it and return the selected SubConn.
+	subConns []balancer.SubConn
+	next     uint32
+}
+
+func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	subConnsLen := uint32(len(p.subConns))
+	nextIndex := atomic.AddUint32(&p.next, 1)
+
+	sc := p.subConns[nextIndex%subConnsLen]
+	return balancer.PickResult{SubConn: sc}, nil
+}
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
new file mode 100644
index 000000000..0359956d3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -0,0 +1,481 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"sync"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/internal/balancer/gracefulswitch"
+	"google.golang.org/grpc/internal/buffer"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/status"
+)
+
+// ccBalancerWrapper sits between the ClientConn and the Balancer.
+//
+// ccBalancerWrapper implements methods corresponding to the ones on the
+// balancer.Balancer interface. The ClientConn is free to call these methods
+// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
+// to the Balancer happen synchronously and in order.
+//
+// ccBalancerWrapper also implements the balancer.ClientConn interface and is
+// passed to the Balancer implementations. It invokes unexported methods on the
+// ClientConn to handle these calls from the Balancer.
+//
+// It uses the gracefulswitch.Balancer internally to ensure that balancer
+// switches happen in a graceful manner.
+type ccBalancerWrapper struct {
+	cc *ClientConn
+
+	// Since these fields are accessed only from handleXxx() methods which are
+	// synchronized by the watcher goroutine, we do not need a mutex to protect
+	// these fields.
+	balancer        *gracefulswitch.Balancer
+	curBalancerName string
+
+	updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
+	resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
+	closed   *grpcsync.Event   // Indicates if close has been called.
+	done     *grpcsync.Event   // Indicates if close has completed its work.
+}
+
+// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
+// is not created until the switchTo() method is invoked.
+func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
+	ccb := &ccBalancerWrapper{
+		cc:       cc,
+		updateCh: buffer.NewUnbounded(),
+		resultCh: buffer.NewUnbounded(),
+		closed:   grpcsync.NewEvent(),
+		done:     grpcsync.NewEvent(),
+	}
+	go ccb.watcher()
+	ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
+	return ccb
+}
+
+// The following xxxUpdate structs wrap the arguments received as part of the
+// corresponding update. The watcher goroutine uses the 'type' of the update to
+// invoke the appropriate handler routine to handle the update.
+
+type ccStateUpdate struct {
+	ccs *balancer.ClientConnState
+}
+
+type scStateUpdate struct {
+	sc    balancer.SubConn
+	state connectivity.State
+	err   error
+}
+
+type exitIdleUpdate struct{}
+
+type resolverErrorUpdate struct {
+	err error
+}
+
+type switchToUpdate struct {
+	name string
+}
+
+type subConnUpdate struct {
+	acbw *acBalancerWrapper
+}
+
+// watcher is a long-running goroutine which reads updates from a channel and
+// invokes corresponding methods on the underlying balancer. It ensures that
+// these methods are invoked in a synchronous fashion. It also ensures that
+// these methods are invoked in the order in which the updates were received.
+func (ccb *ccBalancerWrapper) watcher() {
+	for {
+		select {
+		case u := <-ccb.updateCh.Get():
+			ccb.updateCh.Load()
+			if ccb.closed.HasFired() {
+				break
+			}
+			switch update := u.(type) {
+			case *ccStateUpdate:
+				ccb.handleClientConnStateChange(update.ccs)
+			case *scStateUpdate:
+				ccb.handleSubConnStateChange(update)
+			case *exitIdleUpdate:
+				ccb.handleExitIdle()
+			case *resolverErrorUpdate:
+				ccb.handleResolverError(update.err)
+			case *switchToUpdate:
+				ccb.handleSwitchTo(update.name)
+			case *subConnUpdate:
+				ccb.handleRemoveSubConn(update.acbw)
+			default:
+				logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
+			}
+		case <-ccb.closed.Done():
+		}
+
+		if ccb.closed.HasFired() {
+			ccb.handleClose()
+			return
+		}
+	}
+}
+
+// updateClientConnState is invoked by grpc to push a ClientConnState update to
+// the underlying balancer.
+//
+// Unlike other methods invoked by grpc to push updates to the underlying
+// balancer, this method cannot simply push the update onto the update channel
+// and return. It needs to return the error returned by the underlying balancer
+// back to grpc which propagates that to the resolver.
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+	ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
+
+	var res interface{}
+	select {
+	case res = <-ccb.resultCh.Get():
+		ccb.resultCh.Load()
+	case <-ccb.closed.Done():
+		// Return early if the balancer wrapper is closed while we are waiting for
+		// the underlying balancer to process a ClientConnState update.
+		return nil
+	}
+	// If the returned error is nil, attempting to type assert to error leads to
+	// panic. So, this needs to handled separately.
+	if res == nil {
+		return nil
+	}
+	return res.(error)
+}
+
+// handleClientConnStateChange handles a ClientConnState update from the update
+// channel and invokes the appropriate method on the underlying balancer.
+//
+// If the addresses specified in the update contain addresses of type "grpclb"
+// and the selected LB policy is not "grpclb", these addresses will be filtered
+// out and ccs will be modified with the updated address list.
+func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
+	if ccb.curBalancerName != grpclbName {
+		// Filter any grpclb addresses since we don't have the grpclb balancer.
+		var addrs []resolver.Address
+		for _, addr := range ccs.ResolverState.Addresses {
+			if addr.Type == resolver.GRPCLB {
+				continue
+			}
+			addrs = append(addrs, addr)
+		}
+		ccs.ResolverState.Addresses = addrs
+	}
+	ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
+}
+
+// updateSubConnState is invoked by grpc to push a subConn state update to the
+// underlying balancer.
+func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
+	// When updating addresses for a SubConn, if the address in use is not in
+	// the new addresses, the old ac will be tearDown() and a new ac will be
+	// created. tearDown() generates a state change with Shutdown state, we
+	// don't want the balancer to receive this state change. So before
+	// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
+	// this function will be called with (nil, Shutdown). We don't need to call
+	// balancer method in this case.
+	if sc == nil {
+		return
+	}
+	ccb.updateCh.Put(&scStateUpdate{
+		sc:    sc,
+		state: s,
+		err:   err,
+	})
+}
+
+// handleSubConnStateChange handles a SubConnState update from the update
+// channel and invokes the appropriate method on the underlying balancer.
+func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
+	ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
+}
+
+func (ccb *ccBalancerWrapper) exitIdle() {
+	ccb.updateCh.Put(&exitIdleUpdate{})
+}
+
+func (ccb *ccBalancerWrapper) handleExitIdle() {
+	if ccb.cc.GetState() != connectivity.Idle {
+		return
+	}
+	ccb.balancer.ExitIdle()
+}
+
+func (ccb *ccBalancerWrapper) resolverError(err error) {
+	ccb.updateCh.Put(&resolverErrorUpdate{err: err})
+}
+
+func (ccb *ccBalancerWrapper) handleResolverError(err error) {
+	ccb.balancer.ResolverError(err)
+}
+
+// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
+// LB policy identified by name.
+//
+// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
+// first good update from the name resolver, it determines the LB policy to use
+// and invokes the switchTo() method. Upon receipt of every subsequent update
+// from the name resolver, it invokes this method.
+//
+// the ccBalancerWrapper keeps track of the current LB policy name, and skips
+// the graceful balancer switching process if the name does not change.
+func (ccb *ccBalancerWrapper) switchTo(name string) {
+	ccb.updateCh.Put(&switchToUpdate{name: name})
+}
+
+// handleSwitchTo handles a balancer switch update from the update channel. It
+// calls the SwitchTo() method on the gracefulswitch.Balancer with a
+// balancer.Builder corresponding to name. If no balancer.Builder is registered
+// for the given name, it uses the default LB policy which is "pick_first".
+func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
+	// TODO: Other languages use case-insensitive balancer registries. We should
+	// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
+	if strings.EqualFold(ccb.curBalancerName, name) {
+		return
+	}
+
+	// TODO: Ensure that name is a registered LB policy when we get here.
+	// We currently only validate the `loadBalancingConfig` field. We need to do
+	// the same for the `loadBalancingPolicy` field and reject the service config
+	// if the specified policy is not registered.
+	builder := balancer.Get(name)
+	if builder == nil {
+		channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
+		builder = newPickfirstBuilder()
+	} else {
+		channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
+	}
+
+	if err := ccb.balancer.SwitchTo(builder); err != nil {
+		channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
+		return
+	}
+	ccb.curBalancerName = builder.Name()
+}
+
+// handleRemoveSucConn handles a request from the underlying balancer to remove
+// a subConn.
+//
+// See comments in RemoveSubConn() for more details.
+func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
+	ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
+}
+
+func (ccb *ccBalancerWrapper) close() {
+	ccb.closed.Fire()
+	<-ccb.done.Done()
+}
+
+func (ccb *ccBalancerWrapper) handleClose() {
+	ccb.balancer.Close()
+	ccb.done.Fire()
+}
+
+func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+	if len(addrs) <= 0 {
+		return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
+	}
+	ac, err := ccb.cc.newAddrConn(addrs, opts)
+	if err != nil {
+		channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
+		return nil, err
+	}
+	acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
+	acbw.ac.mu.Lock()
+	ac.acbw = acbw
+	acbw.ac.mu.Unlock()
+	return acbw, nil
+}
+
+func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+	// Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
+	// was required to handle the RemoveSubConn() method asynchronously by pushing
+	// the update onto the update channel. This was done to avoid a deadlock as
+	// switchBalancer() was holding cc.mu when calling Close() on the old
+	// balancer, which would in turn call RemoveSubConn().
+	//
+	// With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
+	// asynchronously is probably not required anymore since the switchTo() method
+	// handles the balancer switch by pushing the update onto the channel.
+	// TODO(easwars): Handle this inline.
+	acbw, ok := sc.(*acBalancerWrapper)
+	if !ok {
+		return
+	}
+	ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
+}
+
+func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+	acbw, ok := sc.(*acBalancerWrapper)
+	if !ok {
+		return
+	}
+	acbw.UpdateAddresses(addrs)
+}
+
+func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+	// Update picker before updating state.  Even though the ordering here does
+	// not matter, it can lead to multiple calls of Pick in the common start-up
+	// case where we wait for ready and then perform an RPC.  If the picker is
+	// updated later, we could call the "connecting" picker when the state is
+	// updated, and then call the "ready" picker after the picker gets updated.
+	ccb.cc.blockingpicker.updatePicker(s.Picker)
+	ccb.cc.csMgr.updateState(s.ConnectivityState)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
+	ccb.cc.resolveNow(o)
+}
+
+func (ccb *ccBalancerWrapper) Target() string {
+	return ccb.cc.target
+}
+
+// acBalancerWrapper is a wrapper on top of ac for balancers.
+// It implements balancer.SubConn interface.
+type acBalancerWrapper struct {
+	mu        sync.Mutex
+	ac        *addrConn
+	producers map[balancer.ProducerBuilder]*refCountedProducer
+}
+
+func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
+	acbw.mu.Lock()
+	defer acbw.mu.Unlock()
+	if len(addrs) <= 0 {
+		acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
+		return
+	}
+	if !acbw.ac.tryUpdateAddrs(addrs) {
+		cc := acbw.ac.cc
+		opts := acbw.ac.scopts
+		acbw.ac.mu.Lock()
+		// Set old ac.acbw to nil so the Shutdown state update will be ignored
+		// by balancer.
+		//
+		// TODO(bar) the state transition could be wrong when tearDown() old ac
+		// and creating new ac, fix the transition.
+		acbw.ac.acbw = nil
+		acbw.ac.mu.Unlock()
+		acState := acbw.ac.getState()
+		acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
+
+		if acState == connectivity.Shutdown {
+			return
+		}
+
+		newAC, err := cc.newAddrConn(addrs, opts)
+		if err != nil {
+			channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
+			return
+		}
+		acbw.ac = newAC
+		newAC.mu.Lock()
+		newAC.acbw = acbw
+		newAC.mu.Unlock()
+		if acState != connectivity.Idle {
+			go newAC.connect()
+		}
+	}
+}
+
+func (acbw *acBalancerWrapper) Connect() {
+	acbw.mu.Lock()
+	defer acbw.mu.Unlock()
+	go acbw.ac.connect()
+}
+
+func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
+	acbw.mu.Lock()
+	defer acbw.mu.Unlock()
+	return acbw.ac
+}
+
+var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected")
+
+// NewStream begins a streaming RPC on the addrConn.  If the addrConn is not
+// ready, returns errSubConnNotReady.
+func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+	transport := acbw.ac.getReadyTransport()
+	if transport == nil {
+		return nil, errSubConnNotReady
+	}
+	return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
+}
+
+// Invoke performs a unary RPC.  If the addrConn is not ready, returns
+// errSubConnNotReady.
+func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error {
+	cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
+	if err != nil {
+		return err
+	}
+	if err := cs.SendMsg(args); err != nil {
+		return err
+	}
+	return cs.RecvMsg(reply)
+}
+
+type refCountedProducer struct {
+	producer balancer.Producer
+	refs     int    // number of current refs to the producer
+	close    func() // underlying producer's close function
+}
+
+func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
+	acbw.mu.Lock()
+	defer acbw.mu.Unlock()
+
+	// Look up existing producer from this builder.
+	pData := acbw.producers[pb]
+	if pData == nil {
+		// Not found; create a new one and add it to the producers map.
+		p, close := pb.Build(acbw)
+		pData = &refCountedProducer{producer: p, close: close}
+		acbw.producers[pb] = pData
+	}
+	// Account for this new reference.
+	pData.refs++
+
+	// Return a cleanup function wrapped in a OnceFunc to remove this reference
+	// and delete the refCountedProducer from the map if the total reference
+	// count goes to zero.
+	unref := func() {
+		acbw.mu.Lock()
+		pData.refs--
+		if pData.refs == 0 {
+			defer pData.close() // Run outside the acbw mutex
+			delete(acbw.producers, pb)
+		}
+		acbw.mu.Unlock()
+	}
+	return pData.producer, grpcsync.OnceFunc(unref)
+}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
new file mode 100644
index 000000000..66d141fce
--- /dev/null
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -0,0 +1,1183 @@
+// Copyright 2018 The gRPC Authors
+// All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.28.1
+// 	protoc        v3.14.0
+// source: grpc/binlog/v1/binarylog.proto
+
+package grpc_binarylog_v1
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Enumerates the type of event
+// Note the terminology is different from the RPC semantics
+// definition, but the same meaning is expressed here.
+type GrpcLogEntry_EventType int32
+
+const (
+	GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0
+	// Header sent from client to server
+	GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1
+	// Header sent from server to client
+	GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2
+	// Message sent from client to server
+	GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3
+	// Message sent from server to client
+	GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4
+	// A signal that client is done sending
+	GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5
+	// Trailer indicates the end of the RPC.
+	// On client side, this event means a trailer was either received
+	// from the network or the gRPC library locally generated a status
+	// to inform the application about a failure.
+	// On server side, this event means the server application requested
+	// to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after
+	// this due to races on server side.
+	GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6
+	// A signal that the RPC is cancelled. On client side, this
+	// indicates the client application requests a cancellation.
+	// On server side, this indicates that cancellation was detected.
+	// Note: This marks the end of the RPC. Events may arrive after
+	// this due to races. For example, on client side a trailer
+	// may arrive even though the application requested to cancel the RPC.
+	GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
+)
+
+// Enum value maps for GrpcLogEntry_EventType.
+var (
+	GrpcLogEntry_EventType_name = map[int32]string{
+		0: "EVENT_TYPE_UNKNOWN",
+		1: "EVENT_TYPE_CLIENT_HEADER",
+		2: "EVENT_TYPE_SERVER_HEADER",
+		3: "EVENT_TYPE_CLIENT_MESSAGE",
+		4: "EVENT_TYPE_SERVER_MESSAGE",
+		5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
+		6: "EVENT_TYPE_SERVER_TRAILER",
+		7: "EVENT_TYPE_CANCEL",
+	}
+	GrpcLogEntry_EventType_value = map[string]int32{
+		"EVENT_TYPE_UNKNOWN":           0,
+		"EVENT_TYPE_CLIENT_HEADER":     1,
+		"EVENT_TYPE_SERVER_HEADER":     2,
+		"EVENT_TYPE_CLIENT_MESSAGE":    3,
+		"EVENT_TYPE_SERVER_MESSAGE":    4,
+		"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
+		"EVENT_TYPE_SERVER_TRAILER":    6,
+		"EVENT_TYPE_CANCEL":            7,
+	}
+)
+
+func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType {
+	p := new(GrpcLogEntry_EventType)
+	*p = x
+	return p
+}
+
+func (x GrpcLogEntry_EventType) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor()
+}
+
+func (GrpcLogEntry_EventType) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0]
+}
+
+func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead.
+func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Enumerates the entity that generates the log entry
+type GrpcLogEntry_Logger int32
+
+const (
+	GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0
+	GrpcLogEntry_LOGGER_CLIENT  GrpcLogEntry_Logger = 1
+	GrpcLogEntry_LOGGER_SERVER  GrpcLogEntry_Logger = 2
+)
+
+// Enum value maps for GrpcLogEntry_Logger.
+var (
+	GrpcLogEntry_Logger_name = map[int32]string{
+		0: "LOGGER_UNKNOWN",
+		1: "LOGGER_CLIENT",
+		2: "LOGGER_SERVER",
+	}
+	GrpcLogEntry_Logger_value = map[string]int32{
+		"LOGGER_UNKNOWN": 0,
+		"LOGGER_CLIENT":  1,
+		"LOGGER_SERVER":  2,
+	}
+)
+
+func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger {
+	p := new(GrpcLogEntry_Logger)
+	*p = x
+	return p
+}
+
+func (x GrpcLogEntry_Logger) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor()
+}
+
+func (GrpcLogEntry_Logger) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1]
+}
+
+func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead.
+func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1}
+}
+
+type Address_Type int32
+
+const (
+	Address_TYPE_UNKNOWN Address_Type = 0
+	// address is in 1.2.3.4 form
+	Address_TYPE_IPV4 Address_Type = 1
+	// address is in IPv6 canonical form (RFC5952 section 4)
+	// The scope is NOT included in the address string.
+	Address_TYPE_IPV6 Address_Type = 2
+	// address is UDS string
+	Address_TYPE_UNIX Address_Type = 3
+)
+
+// Enum value maps for Address_Type.
+var (
+	Address_Type_name = map[int32]string{
+		0: "TYPE_UNKNOWN",
+		1: "TYPE_IPV4",
+		2: "TYPE_IPV6",
+		3: "TYPE_UNIX",
+	}
+	Address_Type_value = map[string]int32{
+		"TYPE_UNKNOWN": 0,
+		"TYPE_IPV4":    1,
+		"TYPE_IPV6":    2,
+		"TYPE_UNIX":    3,
+	}
+)
+
+func (x Address_Type) Enum() *Address_Type {
+	p := new(Address_Type)
+	*p = x
+	return p
+}
+
+func (x Address_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Address_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor()
+}
+
+func (Address_Type) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2]
+}
+
+func (x Address_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Address_Type.Descriptor instead.
+func (Address_Type) EnumDescriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0}
+}
+
+// Log entry we store in binary logs
+type GrpcLogEntry struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The timestamp of the binary log message
+	Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	// Uniquely identifies a call. The value must not be 0 in order to disambiguate
+	// from an unset value.
+	// Each call may have several log entries, they will all have the same call_id.
+	// Nothing is guaranteed about their value other than they are unique across
+	// different RPCs in the same gRPC process.
+	CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"`
+	// The entry sequence id for this call. The first GrpcLogEntry has a
+	// value of 1, to disambiguate from an unset value. The purpose of
+	// this field is to detect missing entries in environments where
+	// durability or ordering is not guaranteed.
+	SequenceIdWithinCall uint64                 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
+	Type                 GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
+	Logger               GrpcLogEntry_Logger    `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum
+	// The logger uses one of the following fields to record the payload,
+	// according to the type of the log entry.
+	//
+	// Types that are assignable to Payload:
+	//
+	//	*GrpcLogEntry_ClientHeader
+	//	*GrpcLogEntry_ServerHeader
+	//	*GrpcLogEntry_Message
+	//	*GrpcLogEntry_Trailer
+	Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"`
+	// true if payload does not represent the full message or metadata.
+	PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"`
+	// Peer address information, will only be recorded on the first
+	// incoming event. On client side, peer is logged on
+	// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
+	// the case of trailers-only. On server side, peer is always
+	// logged on EVENT_TYPE_CLIENT_HEADER.
+	Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
+}
+
+func (x *GrpcLogEntry) Reset() {
+	*x = GrpcLogEntry{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *GrpcLogEntry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcLogEntry) ProtoMessage() {}
+
+func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead.
+func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.Timestamp
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetCallId() uint64 {
+	if x != nil {
+		return x.CallId
+	}
+	return 0
+}
+
+func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
+	if x != nil {
+		return x.SequenceIdWithinCall
+	}
+	return 0
+}
+
+func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
+	if x != nil {
+		return x.Type
+	}
+	return GrpcLogEntry_EVENT_TYPE_UNKNOWN
+}
+
+func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
+	if x != nil {
+		return x.Logger
+	}
+	return GrpcLogEntry_LOGGER_UNKNOWN
+}
+
+func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
+		return x.ClientHeader
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
+		return x.ServerHeader
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetMessage() *Message {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok {
+		return x.Message
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetTrailer() *Trailer {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok {
+		return x.Trailer
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetPayloadTruncated() bool {
+	if x != nil {
+		return x.PayloadTruncated
+	}
+	return false
+}
+
+func (x *GrpcLogEntry) GetPeer() *Address {
+	if x != nil {
+		return x.Peer
+	}
+	return nil
+}
+
+type isGrpcLogEntry_Payload interface {
+	isGrpcLogEntry_Payload()
+}
+
+type GrpcLogEntry_ClientHeader struct {
+	ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"`
+}
+
+type GrpcLogEntry_ServerHeader struct {
+	ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"`
+}
+
+type GrpcLogEntry_Message struct {
+	// Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE
+	Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
+}
+
+type GrpcLogEntry_Trailer struct {
+	Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"`
+}
+
+func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {}
+
+func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {}
+
+func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
+
+func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
+
+type ClientHeader struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// This contains only the metadata from the application.
+	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	// The name of the RPC method, which looks something like:
+	// /<service>/<method>
+	// Note the leading "/" character.
+	MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
+	// A single process may be used to run multiple virtual
+	// servers with different identities.
+	// The authority is the name of such a server identitiy.
+	// It is typically a portion of the URI in the form of
+	// <host> or <host>:<port> .
+	Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
+	// the RPC timeout
+	Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+}
+
+func (x *ClientHeader) Reset() {
+	*x = ClientHeader{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ClientHeader) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientHeader) ProtoMessage() {}
+
+func (x *ClientHeader) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead.
+func (*ClientHeader) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ClientHeader) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+func (x *ClientHeader) GetMethodName() string {
+	if x != nil {
+		return x.MethodName
+	}
+	return ""
+}
+
+func (x *ClientHeader) GetAuthority() string {
+	if x != nil {
+		return x.Authority
+	}
+	return ""
+}
+
+func (x *ClientHeader) GetTimeout() *durationpb.Duration {
+	if x != nil {
+		return x.Timeout
+	}
+	return nil
+}
+
+type ServerHeader struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// This contains only the metadata from the application.
+	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+}
+
+func (x *ServerHeader) Reset() {
+	*x = ServerHeader{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ServerHeader) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerHeader) ProtoMessage() {}
+
+func (x *ServerHeader) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead.
+func (*ServerHeader) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ServerHeader) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+type Trailer struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// This contains only the metadata from the application.
+	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	// The gRPC status code.
+	StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
+	// An original status message before any transport specific
+	// encoding.
+	StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
+	// The value of the 'grpc-status-details-bin' metadata key. If
+	// present, this is always an encoded 'google.rpc.Status' message.
+	StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
+}
+
+func (x *Trailer) Reset() {
+	*x = Trailer{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Trailer) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Trailer) ProtoMessage() {}
+
+func (x *Trailer) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Trailer.ProtoReflect.Descriptor instead.
+func (*Trailer) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Trailer) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+func (x *Trailer) GetStatusCode() uint32 {
+	if x != nil {
+		return x.StatusCode
+	}
+	return 0
+}
+
+func (x *Trailer) GetStatusMessage() string {
+	if x != nil {
+		return x.StatusMessage
+	}
+	return ""
+}
+
+func (x *Trailer) GetStatusDetails() []byte {
+	if x != nil {
+		return x.StatusDetails
+	}
+	return nil
+}
+
+// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
+type Message struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Length of the message. It may not be the same as the length of the
+	// data field, as the logging payload can be truncated or omitted.
+	Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
+	// May be truncated or omitted.
+	Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *Message) Reset() {
+	*x = Message{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Message) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
+func (*Message) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Message) GetLength() uint32 {
+	if x != nil {
+		return x.Length
+	}
+	return 0
+}
+
+func (x *Message) GetData() []byte {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+// A list of metadata pairs, used in the payload of client header,
+// server header, and server trailer.
+// Implementations may omit some entries to honor the header limits
+// of GRPC_BINARY_LOG_CONFIG.
+//
+// Header keys added by gRPC are omitted. To be more specific,
+// implementations will not log the following entries, and this is
+// not to be treated as a truncation:
+//   - entries handled by grpc that are not user visible, such as those
+//     that begin with 'grpc-' (with exception of grpc-trace-bin)
+//     or keys like 'lb-token'
+//   - transport specific entries, including but not limited to:
+//     ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
+//   - entries added for call credentials
+//
+// Implementations must always log grpc-trace-bin if it is present.
+// Practically speaking it will only be visible on server side because
+// grpc-trace-bin is managed by low level client side mechanisms
+// inaccessible from the application level. On server side, the
+// header is just a normal metadata key.
+// The pair will not count towards the size limit.
+type Metadata struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
+}
+
+func (x *Metadata) Reset() {
+	*x = Metadata{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Metadata) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metadata) ProtoMessage() {}
+
+func (x *Metadata) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
+func (*Metadata) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Metadata) GetEntry() []*MetadataEntry {
+	if x != nil {
+		return x.Entry
+	}
+	return nil
+}
+
+// A metadata key value pair
+type MetadataEntry struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Key   string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MetadataEntry) Reset() {
+	*x = MetadataEntry{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MetadataEntry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataEntry) ProtoMessage() {}
+
+func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead.
+func (*MetadataEntry) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *MetadataEntry) GetKey() string {
+	if x != nil {
+		return x.Key
+	}
+	return ""
+}
+
+func (x *MetadataEntry) GetValue() []byte {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+// Address information
+type Address struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Type    Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
+	Address string       `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
+	// only for TYPE_IPV4 and TYPE_IPV6
+	IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
+}
+
+func (x *Address) Reset() {
+	*x = Address{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Address) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Address) ProtoMessage() {}
+
+func (x *Address) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Address.ProtoReflect.Descriptor instead.
+func (*Address) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Address) GetType() Address_Type {
+	if x != nil {
+		return x.Type
+	}
+	return Address_TYPE_UNKNOWN
+}
+
+func (x *Address) GetAddress() string {
+	if x != nil {
+		return x.Address
+	}
+	return ""
+}
+
+func (x *Address) GetIpPort() uint32 {
+	if x != nil {
+		return x.IpPort
+	}
+	return 0
+}
+
+var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
+
+var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
+	0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+	0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+	0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
+	0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+	0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75,
+	0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63,
+	0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65,
+	0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12,
+	0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+	0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45,
+	0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e,
+	0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e,
+	0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e,
+	0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46,
+	0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
+	0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
+	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+	0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+	0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00,
+	0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36,
+	0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d,
+	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65,
+	0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
+	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69,
+	0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b,
+	0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61,
+	0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+	0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70,
+	0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64,
+	0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09,
+	0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45,
+	0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+	0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12,
+	0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45,
+	0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a,
+	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45,
+	0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19,
+	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45,
+	0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45,
+	0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
+	0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a,
+	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
+	0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11,
+	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45,
+	0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a,
+	0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+	0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45,
+	0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53,
+	0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+	0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61,
+	0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
+	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+	0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b,
+	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
+	0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74,
+	0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+	0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+	0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79,
+	0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
+	0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72,
+	0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+	0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
+	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61,
+	0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f,
+	0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12,
+	0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d,
+	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+	0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d,
+	0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a,
+	0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+	0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+	0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
+	0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+	0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
+	0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e,
+	0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79,
+	0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07,
+	0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69,
+	0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a,
+	0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
+	0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d,
+	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a,
+	0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14,
+	0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f,
+	0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62,
+	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
+	0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x33,
+}
+
+var (
+	file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
+	file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc
+)
+
+func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
+	file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
+		file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData)
+	})
+	return file_grpc_binlog_v1_binarylog_proto_rawDescData
+}
+
+var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
+	(GrpcLogEntry_EventType)(0),   // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
+	(GrpcLogEntry_Logger)(0),      // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
+	(Address_Type)(0),             // 2: grpc.binarylog.v1.Address.Type
+	(*GrpcLogEntry)(nil),          // 3: grpc.binarylog.v1.GrpcLogEntry
+	(*ClientHeader)(nil),          // 4: grpc.binarylog.v1.ClientHeader
+	(*ServerHeader)(nil),          // 5: grpc.binarylog.v1.ServerHeader
+	(*Trailer)(nil),               // 6: grpc.binarylog.v1.Trailer
+	(*Message)(nil),               // 7: grpc.binarylog.v1.Message
+	(*Metadata)(nil),              // 8: grpc.binarylog.v1.Metadata
+	(*MetadataEntry)(nil),         // 9: grpc.binarylog.v1.MetadataEntry
+	(*Address)(nil),               // 10: grpc.binarylog.v1.Address
+	(*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
+	(*durationpb.Duration)(nil),   // 12: google.protobuf.Duration
+}
+var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{
+	11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp
+	0,  // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType
+	1,  // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger
+	4,  // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader
+	5,  // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader
+	7,  // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message
+	6,  // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer
+	10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address
+	8,  // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+	12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration
+	8,  // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+	8,  // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata
+	9,  // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry
+	2,  // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type
+	14, // [14:14] is the sub-list for method output_type
+	14, // [14:14] is the sub-list for method input_type
+	14, // [14:14] is the sub-list for extension type_name
+	14, // [14:14] is the sub-list for extension extendee
+	0,  // [0:14] is the sub-list for field type_name
+}
+
+func init() { file_grpc_binlog_v1_binarylog_proto_init() }
+func file_grpc_binlog_v1_binarylog_proto_init() {
+	if File_grpc_binlog_v1_binarylog_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*GrpcLogEntry); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ClientHeader); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ServerHeader); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Trailer); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Message); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Metadata); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*MetadataEntry); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Address); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
+		(*GrpcLogEntry_ClientHeader)(nil),
+		(*GrpcLogEntry_ServerHeader)(nil),
+		(*GrpcLogEntry_Message)(nil),
+		(*GrpcLogEntry_Trailer)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc,
+			NumEnums:      3,
+			NumMessages:   8,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_grpc_binlog_v1_binarylog_proto_goTypes,
+		DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs,
+		EnumInfos:         file_grpc_binlog_v1_binarylog_proto_enumTypes,
+		MessageInfos:      file_grpc_binlog_v1_binarylog_proto_msgTypes,
+	}.Build()
+	File_grpc_binlog_v1_binarylog_proto = out.File
+	file_grpc_binlog_v1_binarylog_proto_rawDesc = nil
+	file_grpc_binlog_v1_binarylog_proto_goTypes = nil
+	file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
new file mode 100644
index 000000000..9e20e4d38
--- /dev/null
+++ b/vendor/google.golang.org/grpc/call.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+)
+
+// Invoke sends the RPC request on the wire and returns after response is
+// received.  This is typically called by generated code.
+//
+// All errors returned by Invoke are compatible with the status package.
+func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
+	// allow interceptor to see all applicable call options, which means those
+	// configured as defaults from dial option as well as per-call options
+	opts = combine(cc.dopts.callOptions, opts)
+
+	if cc.dopts.unaryInt != nil {
+		return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
+	}
+	return invoke(ctx, method, args, reply, cc, opts...)
+}
+
+func combine(o1 []CallOption, o2 []CallOption) []CallOption {
+	// we don't use append because o1 could have extra capacity whose
+	// elements would be overwritten, which could cause inadvertent
+	// sharing (and race conditions) between concurrent calls
+	if len(o1) == 0 {
+		return o2
+	} else if len(o2) == 0 {
+		return o1
+	}
+	ret := make([]CallOption, len(o1)+len(o2))
+	copy(ret, o1)
+	copy(ret[len(o1):], o2)
+	return ret
+}
+
+// Invoke sends the RPC request on the wire and returns after response is
+// received.  This is typically called by generated code.
+//
+// DEPRECATED: Use ClientConn.Invoke instead.
+func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+	return cc.Invoke(ctx, method, args, reply, opts...)
+}
+
+var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
+
+func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+	cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
+	if err != nil {
+		return err
+	}
+	if err := cs.SendMsg(req); err != nil {
+		return err
+	}
+	return cs.RecvMsg(reply)
+}
diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go
new file mode 100644
index 000000000..32b7fa579
--- /dev/null
+++ b/vendor/google.golang.org/grpc/channelz/channelz.go
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package channelz exports internals of the channelz implementation as required
+// by other gRPC packages.
+//
+// The implementation of the channelz spec as defined in
+// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
+// the `internal/channelz` package.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are experimental and may be removed in a
+// later release.
+package channelz
+
+import "google.golang.org/grpc/internal/channelz"
+
+// Identifier is an opaque identifier which uniquely identifies an entity in the
+// channelz database.
+type Identifier = channelz.Identifier
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
new file mode 100644
index 000000000..d607d4e9e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -0,0 +1,1647 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	iresolver "google.golang.org/grpc/internal/resolver"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+	"google.golang.org/grpc/status"
+
+	_ "google.golang.org/grpc/balancer/roundrobin"           // To register roundrobin.
+	_ "google.golang.org/grpc/internal/resolver/dns"         // To register dns resolver.
+	_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
+	_ "google.golang.org/grpc/internal/resolver/unix"        // To register unix resolver.
+)
+
+const (
+	// minimum time to give a connection to complete
+	minConnectTimeout = 20 * time.Second
+	// must match grpclbName in grpclb/grpclb.go
+	grpclbName = "grpclb"
+)
+
+var (
+	// ErrClientConnClosing indicates that the operation is illegal because
+	// the ClientConn is closing.
+	//
+	// Deprecated: this error should not be relied upon by users; use the status
+	// code of Canceled instead.
+	ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing")
+	// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
+	errConnDrain = errors.New("grpc: the connection is drained")
+	// errConnClosing indicates that the connection is closing.
+	errConnClosing = errors.New("grpc: the connection is closing")
+	// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
+	// service config.
+	invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
+)
+
+// The following errors are returned from Dial and DialContext
+var (
+	// errNoTransportSecurity indicates that there is no transport security
+	// being set for ClientConn. Users should either set one or explicitly
+	// call WithInsecure DialOption to disable security.
+	errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)")
+	// errTransportCredsAndBundle indicates that creds bundle is used together
+	// with other individual Transport Credentials.
+	errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
+	// errNoTransportCredsInBundle indicated that the configured creds bundle
+	// returned a transport credentials which was nil.
+	errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials")
+	// errTransportCredentialsMissing indicates that users want to transmit
+	// security information (e.g., OAuth2 token) which requires secure
+	// connection on an insecure connection.
+	errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
+)
+
+const (
+	defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
+	defaultClientMaxSendMessageSize    = math.MaxInt32
+	// http2IOBufSize specifies the buffer size for sending frames.
+	defaultWriteBufSize = 32 * 1024
+	defaultReadBufSize  = 32 * 1024
+)
+
+// Dial creates a client connection to the given target.
+func Dial(target string, opts ...DialOption) (*ClientConn, error) {
+	return DialContext(context.Background(), target, opts...)
+}
+
+type defaultConfigSelector struct {
+	sc *ServiceConfig
+}
+
+func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
+	return &iresolver.RPCConfig{
+		Context:      rpcInfo.Context,
+		MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method),
+	}, nil
+}
+
+// DialContext creates a client connection to the given target. By default, it's
+// a non-blocking dial (the function won't wait for connections to be
+// established, and connecting happens in the background). To make it a blocking
+// dial, use WithBlock() dial option.
+//
+// In the non-blocking case, the ctx does not act against the connection. It
+// only controls the setup steps.
+//
+// In the blocking case, ctx can be used to cancel or expire the pending
+// connection. Once this function returns, the cancellation and expiration of
+// ctx will be noop. Users should call ClientConn.Close to terminate all the
+// pending operations after this function returns.
+//
+// The target name syntax is defined in
+// https://github.com/grpc/grpc/blob/master/doc/naming.md.
+// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
+func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+	cc := &ClientConn{
+		target:            target,
+		csMgr:             &connectivityStateManager{},
+		conns:             make(map[*addrConn]struct{}),
+		dopts:             defaultDialOptions(),
+		blockingpicker:    newPickerWrapper(),
+		czData:            new(channelzData),
+		firstResolveEvent: grpcsync.NewEvent(),
+	}
+	cc.retryThrottler.Store((*retryThrottler)(nil))
+	cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
+	cc.ctx, cc.cancel = context.WithCancel(context.Background())
+
+	for _, opt := range extraDialOptions {
+		opt.apply(&cc.dopts)
+	}
+
+	for _, opt := range opts {
+		opt.apply(&cc.dopts)
+	}
+
+	chainUnaryClientInterceptors(cc)
+	chainStreamClientInterceptors(cc)
+
+	defer func() {
+		if err != nil {
+			cc.Close()
+		}
+	}()
+
+	pid := cc.dopts.channelzParentID
+	cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target)
+	ted := &channelz.TraceEventDesc{
+		Desc:     "Channel created",
+		Severity: channelz.CtInfo,
+	}
+	if cc.dopts.channelzParentID != nil {
+		ted.Parent = &channelz.TraceEventDesc{
+			Desc:     fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()),
+			Severity: channelz.CtInfo,
+		}
+	}
+	channelz.AddTraceEvent(logger, cc.channelzID, 1, ted)
+	cc.csMgr.channelzID = cc.channelzID
+
+	if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
+		return nil, errNoTransportSecurity
+	}
+	if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
+		return nil, errTransportCredsAndBundle
+	}
+	if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
+		return nil, errNoTransportCredsInBundle
+	}
+	transportCreds := cc.dopts.copts.TransportCredentials
+	if transportCreds == nil {
+		transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
+	}
+	if transportCreds.Info().SecurityProtocol == "insecure" {
+		for _, cd := range cc.dopts.copts.PerRPCCredentials {
+			if cd.RequireTransportSecurity() {
+				return nil, errTransportCredentialsMissing
+			}
+		}
+	}
+
+	if cc.dopts.defaultServiceConfigRawJSON != nil {
+		scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+		if scpr.Err != nil {
+			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
+		}
+		cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
+	}
+	cc.mkp = cc.dopts.copts.KeepaliveParams
+
+	if cc.dopts.copts.UserAgent != "" {
+		cc.dopts.copts.UserAgent += " " + grpcUA
+	} else {
+		cc.dopts.copts.UserAgent = grpcUA
+	}
+
+	if cc.dopts.timeout > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
+		defer cancel()
+	}
+	defer func() {
+		select {
+		case <-ctx.Done():
+			switch {
+			case ctx.Err() == err:
+				conn = nil
+			case err == nil || !cc.dopts.returnLastError:
+				conn, err = nil, ctx.Err()
+			default:
+				conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
+			}
+		default:
+		}
+	}()
+
+	scSet := false
+	if cc.dopts.scChan != nil {
+		// Try to get an initial service config.
+		select {
+		case sc, ok := <-cc.dopts.scChan:
+			if ok {
+				cc.sc = &sc
+				cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
+				scSet = true
+			}
+		default:
+		}
+	}
+	if cc.dopts.bs == nil {
+		cc.dopts.bs = backoff.DefaultExponential
+	}
+
+	// Determine the resolver to use.
+	resolverBuilder, err := cc.parseTargetAndFindResolver()
+	if err != nil {
+		return nil, err
+	}
+	cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts)
+	if err != nil {
+		return nil, err
+	}
+	channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
+
+	if cc.dopts.scChan != nil && !scSet {
+		// Blocking wait for the initial service config.
+		select {
+		case sc, ok := <-cc.dopts.scChan:
+			if ok {
+				cc.sc = &sc
+				cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
+			}
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		}
+	}
+	if cc.dopts.scChan != nil {
+		go cc.scWatcher()
+	}
+
+	var credsClone credentials.TransportCredentials
+	if creds := cc.dopts.copts.TransportCredentials; creds != nil {
+		credsClone = creds.Clone()
+	}
+	cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
+		DialCreds:        credsClone,
+		CredsBundle:      cc.dopts.copts.CredsBundle,
+		Dialer:           cc.dopts.copts.Dialer,
+		Authority:        cc.authority,
+		CustomUserAgent:  cc.dopts.copts.UserAgent,
+		ChannelzParentID: cc.channelzID,
+		Target:           cc.parsedTarget,
+	})
+
+	// Build the resolver.
+	rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
+	if err != nil {
+		return nil, fmt.Errorf("failed to build resolver: %v", err)
+	}
+	cc.mu.Lock()
+	cc.resolverWrapper = rWrapper
+	cc.mu.Unlock()
+
+	// A blocking dial blocks until the clientConn is ready.
+	if cc.dopts.block {
+		for {
+			cc.Connect()
+			s := cc.GetState()
+			if s == connectivity.Ready {
+				break
+			} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
+				if err = cc.connectionError(); err != nil {
+					terr, ok := err.(interface {
+						Temporary() bool
+					})
+					if ok && !terr.Temporary() {
+						return nil, err
+					}
+				}
+			}
+			if !cc.WaitForStateChange(ctx, s) {
+				// ctx got timeout or canceled.
+				if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
+					return nil, err
+				}
+				return nil, ctx.Err()
+			}
+		}
+	}
+
+	return cc, nil
+}
+
+// chainUnaryClientInterceptors chains all unary client interceptors into one.
+func chainUnaryClientInterceptors(cc *ClientConn) {
+	interceptors := cc.dopts.chainUnaryInts
+	// Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will
+	// be executed before any other chained interceptors.
+	if cc.dopts.unaryInt != nil {
+		interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...)
+	}
+	var chainedInt UnaryClientInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
+			return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
+		}
+	}
+	cc.dopts.unaryInt = chainedInt
+}
+
+// getChainUnaryInvoker recursively generate the chained unary invoker.
+func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker {
+	if curr == len(interceptors)-1 {
+		return finalInvoker
+	}
+	return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+		return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
+	}
+}
+
+// chainStreamClientInterceptors chains all stream client interceptors into one.
+func chainStreamClientInterceptors(cc *ClientConn) {
+	interceptors := cc.dopts.chainStreamInts
+	// Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will
+	// be executed before any other chained interceptors.
+	if cc.dopts.streamInt != nil {
+		interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...)
+	}
+	var chainedInt StreamClientInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) {
+			return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...)
+		}
+	}
+	cc.dopts.streamInt = chainedInt
+}
+
+// getChainStreamer recursively generate the chained client stream constructor.
+func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer {
+	if curr == len(interceptors)-1 {
+		return finalStreamer
+	}
+	return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+		return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...)
+	}
+}
+
+// connectivityStateManager keeps the connectivity.State of ClientConn.
+// This struct will eventually be exported so the balancers can access it.
+type connectivityStateManager struct {
+	mu         sync.Mutex
+	state      connectivity.State
+	notifyChan chan struct{}
+	channelzID *channelz.Identifier
+}
+
+// updateState updates the connectivity.State of ClientConn.
+// If there's a change it notifies goroutines waiting on state change to
+// happen.
+func (csm *connectivityStateManager) updateState(state connectivity.State) {
+	csm.mu.Lock()
+	defer csm.mu.Unlock()
+	if csm.state == connectivity.Shutdown {
+		return
+	}
+	if csm.state == state {
+		return
+	}
+	csm.state = state
+	channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
+	if csm.notifyChan != nil {
+		// There are other goroutines waiting on this channel.
+		close(csm.notifyChan)
+		csm.notifyChan = nil
+	}
+}
+
+func (csm *connectivityStateManager) getState() connectivity.State {
+	csm.mu.Lock()
+	defer csm.mu.Unlock()
+	return csm.state
+}
+
+func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
+	csm.mu.Lock()
+	defer csm.mu.Unlock()
+	if csm.notifyChan == nil {
+		csm.notifyChan = make(chan struct{})
+	}
+	return csm.notifyChan
+}
+
+// ClientConnInterface defines the functions clients need to perform unary and
+// streaming RPCs.  It is implemented by *ClientConn, and is only intended to
+// be referenced by generated code.
+type ClientConnInterface interface {
+	// Invoke performs a unary RPC and returns after the response is received
+	// into reply.
+	Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error
+	// NewStream begins a streaming RPC.
+	NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
+}
+
+// Assert *ClientConn implements ClientConnInterface.
+var _ ClientConnInterface = (*ClientConn)(nil)
+
+// ClientConn represents a virtual connection to a conceptual endpoint, to
+// perform RPCs.
+//
+// A ClientConn is free to have zero or more actual connections to the endpoint
+// based on configuration, load, etc. It is also free to determine which actual
+// endpoints to use and may change it every RPC, permitting client-side load
+// balancing.
+//
+// A ClientConn encapsulates a range of functionality including name
+// resolution, TCP connection establishment (with retries and backoff) and TLS
+// handshakes. It also handles errors on established connections by
+// re-resolving the name and reconnecting.
+type ClientConn struct {
+	ctx    context.Context    // Initialized using the background context at dial time.
+	cancel context.CancelFunc // Cancelled on close.
+
+	// The following are initialized at dial time, and are read-only after that.
+	target          string               // User's dial target.
+	parsedTarget    resolver.Target      // See parseTargetAndFindResolver().
+	authority       string               // See determineAuthority().
+	dopts           dialOptions          // Default and user specified dial options.
+	channelzID      *channelz.Identifier // Channelz identifier for the channel.
+	balancerWrapper *ccBalancerWrapper   // Uses gracefulswitch.balancer underneath.
+
+	// The following provide their own synchronization, and therefore don't
+	// require cc.mu to be held to access them.
+	csMgr              *connectivityStateManager
+	blockingpicker     *pickerWrapper
+	safeConfigSelector iresolver.SafeConfigSelector
+	czData             *channelzData
+	retryThrottler     atomic.Value // Updated from service config.
+
+	// firstResolveEvent is used to track whether the name resolver sent us at
+	// least one update. RPCs block on this event.
+	firstResolveEvent *grpcsync.Event
+
+	// mu protects the following fields.
+	// TODO: split mu so the same mutex isn't used for everything.
+	mu              sync.RWMutex
+	resolverWrapper *ccResolverWrapper         // Initialized in Dial; cleared in Close.
+	sc              *ServiceConfig             // Latest service config received from the resolver.
+	conns           map[*addrConn]struct{}     // Set to nil on close.
+	mkp             keepalive.ClientParameters // May be updated upon receipt of a GoAway.
+
+	lceMu               sync.Mutex // protects lastConnectionError
+	lastConnectionError error
+}
+
+// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
+// ctx expires. A true value is returned in former case and false in latter.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
+	ch := cc.csMgr.getNotifyChan()
+	if cc.csMgr.getState() != sourceState {
+		return true
+	}
+	select {
+	case <-ctx.Done():
+		return false
+	case <-ch:
+		return true
+	}
+}
+
+// GetState returns the connectivity.State of ClientConn.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func (cc *ClientConn) GetState() connectivity.State {
+	return cc.csMgr.getState()
+}
+
+// Connect causes all subchannels in the ClientConn to attempt to connect if
+// the channel is idle.  Does not wait for the connection attempts to begin
+// before returning.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func (cc *ClientConn) Connect() {
+	cc.balancerWrapper.exitIdle()
+}
+
+func (cc *ClientConn) scWatcher() {
+	for {
+		select {
+		case sc, ok := <-cc.dopts.scChan:
+			if !ok {
+				return
+			}
+			cc.mu.Lock()
+			// TODO: load balance policy runtime change is ignored.
+			// We may revisit this decision in the future.
+			cc.sc = &sc
+			cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
+			cc.mu.Unlock()
+		case <-cc.ctx.Done():
+			return
+		}
+	}
+}
+
+// waitForResolvedAddrs blocks until the resolver has provided addresses or the
+// context expires.  Returns nil unless the context expires first; otherwise
+// returns a status error based on the context.
+func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
+	// This is on the RPC path, so we use a fast path to avoid the
+	// more-expensive "select" below after the resolver has returned once.
+	if cc.firstResolveEvent.HasFired() {
+		return nil
+	}
+	select {
+	case <-cc.firstResolveEvent.Done():
+		return nil
+	case <-ctx.Done():
+		return status.FromContextError(ctx.Err()).Err()
+	case <-cc.ctx.Done():
+		return ErrClientConnClosing
+	}
+}
+
+var emptyServiceConfig *ServiceConfig
+
+func init() {
+	cfg := parseServiceConfig("{}")
+	if cfg.Err != nil {
+		panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
+	}
+	emptyServiceConfig = cfg.Config.(*ServiceConfig)
+}
+
+func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
+	if cc.sc != nil {
+		cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs)
+		return
+	}
+	if cc.dopts.defaultServiceConfig != nil {
+		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs)
+	} else {
+		cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs)
+	}
+}
+
+func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
+	defer cc.firstResolveEvent.Fire()
+	cc.mu.Lock()
+	// Check if the ClientConn is already closed. Some fields (e.g.
+	// balancerWrapper) are set to nil when closing the ClientConn, and could
+	// cause nil pointer panic if we don't have this check.
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return nil
+	}
+
+	if err != nil {
+		// May need to apply the initial service config in case the resolver
+		// doesn't support service configs, or doesn't provide a service config
+		// with the new addresses.
+		cc.maybeApplyDefaultServiceConfig(nil)
+
+		cc.balancerWrapper.resolverError(err)
+
+		// No addresses are valid with err set; return early.
+		cc.mu.Unlock()
+		return balancer.ErrBadResolverState
+	}
+
+	var ret error
+	if cc.dopts.disableServiceConfig {
+		channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
+		cc.maybeApplyDefaultServiceConfig(s.Addresses)
+	} else if s.ServiceConfig == nil {
+		cc.maybeApplyDefaultServiceConfig(s.Addresses)
+		// TODO: do we need to apply a failing LB policy if there is no
+		// default, per the error handling design?
+	} else {
+		if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
+			configSelector := iresolver.GetConfigSelector(s)
+			if configSelector != nil {
+				if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
+					channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector")
+				}
+			} else {
+				configSelector = &defaultConfigSelector{sc}
+			}
+			cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
+		} else {
+			ret = balancer.ErrBadResolverState
+			if cc.sc == nil {
+				// Apply the failing LB only if we haven't received valid service config
+				// from the name resolver in the past.
+				cc.applyFailingLB(s.ServiceConfig)
+				cc.mu.Unlock()
+				return ret
+			}
+		}
+	}
+
+	var balCfg serviceconfig.LoadBalancingConfig
+	if cc.sc != nil && cc.sc.lbConfig != nil {
+		balCfg = cc.sc.lbConfig.cfg
+	}
+	bw := cc.balancerWrapper
+	cc.mu.Unlock()
+
+	uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
+	if ret == nil {
+		ret = uccsErr // prefer ErrBadResolver state since any other error is
+		// currently meaningless to the caller.
+	}
+	return ret
+}
+
+// applyFailingLB is akin to configuring an LB policy on the channel which
+// always fails RPCs. Here, an actual LB policy is not configured, but an always
+// erroring picker is configured, which returns errors with information about
+// what was invalid in the received service config. A config selector with no
+// service config is configured, and the connectivity state of the channel is
+// set to TransientFailure.
+//
+// Caller must hold cc.mu.
+func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
+	var err error
+	if sc.Err != nil {
+		err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
+	} else {
+		err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
+	}
+	cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
+	cc.blockingpicker.updatePicker(base.NewErrPicker(err))
+	cc.csMgr.updateState(connectivity.TransientFailure)
+}
+
+func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
+	cc.balancerWrapper.updateSubConnState(sc, s, err)
+}
+
+// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
+//
+// Caller needs to make sure len(addrs) > 0.
+func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+	ac := &addrConn{
+		state:        connectivity.Idle,
+		cc:           cc,
+		addrs:        addrs,
+		scopts:       opts,
+		dopts:        cc.dopts,
+		czData:       new(channelzData),
+		resetBackoff: make(chan struct{}),
+	}
+	ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
+	// Track ac in cc. This needs to be done before any getTransport(...) is called.
+	cc.mu.Lock()
+	defer cc.mu.Unlock()
+	if cc.conns == nil {
+		return nil, ErrClientConnClosing
+	}
+
+	var err error
+	ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "")
+	if err != nil {
+		return nil, err
+	}
+	channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
+		Desc:     "Subchannel created",
+		Severity: channelz.CtInfo,
+		Parent: &channelz.TraceEventDesc{
+			Desc:     fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()),
+			Severity: channelz.CtInfo,
+		},
+	})
+
+	cc.conns[ac] = struct{}{}
+	return ac, nil
+}
+
+// removeAddrConn removes the addrConn in the subConn from clientConn.
+// It also tears down the ac with the given error.
+func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
+	cc.mu.Lock()
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return
+	}
+	delete(cc.conns, ac)
+	cc.mu.Unlock()
+	ac.tearDown(err)
+}
+
+func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
+	return &channelz.ChannelInternalMetric{
+		State:                    cc.GetState(),
+		Target:                   cc.target,
+		CallsStarted:             atomic.LoadInt64(&cc.czData.callsStarted),
+		CallsSucceeded:           atomic.LoadInt64(&cc.czData.callsSucceeded),
+		CallsFailed:              atomic.LoadInt64(&cc.czData.callsFailed),
+		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)),
+	}
+}
+
+// Target returns the target string of the ClientConn.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func (cc *ClientConn) Target() string {
+	return cc.target
+}
+
+func (cc *ClientConn) incrCallsStarted() {
+	atomic.AddInt64(&cc.czData.callsStarted, 1)
+	atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano())
+}
+
+func (cc *ClientConn) incrCallsSucceeded() {
+	atomic.AddInt64(&cc.czData.callsSucceeded, 1)
+}
+
+func (cc *ClientConn) incrCallsFailed() {
+	atomic.AddInt64(&cc.czData.callsFailed, 1)
+}
+
+// connect starts creating a transport.
+// It does nothing if the ac is not IDLE.
+// TODO(bar) Move this to the addrConn section.
+func (ac *addrConn) connect() error {
+	ac.mu.Lock()
+	if ac.state == connectivity.Shutdown {
+		if logger.V(2) {
+			logger.Infof("connect called on shutdown addrConn; ignoring.")
+		}
+		ac.mu.Unlock()
+		return errConnClosing
+	}
+	if ac.state != connectivity.Idle {
+		if logger.V(2) {
+			logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
+		}
+		ac.mu.Unlock()
+		return nil
+	}
+	// Update connectivity state within the lock to prevent subsequent or
+	// concurrent calls from resetting the transport more than once.
+	ac.updateConnectivityState(connectivity.Connecting, nil)
+	ac.mu.Unlock()
+
+	ac.resetTransport()
+	return nil
+}
+
+func equalAddresses(a, b []resolver.Address) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, v := range a {
+		if !v.Equal(b[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
+//
+// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
+// addresses will be picked up by retry in the next iteration after backoff.
+//
+// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
+//
+// If the addresses is the same as the old list, it does nothing and returns
+// true.
+//
+// If ac is Connecting, it returns false. The caller should tear down the ac and
+// create a new one. Note that the backoff will be reset when this happens.
+//
+// If ac is Ready, it checks whether current connected address of ac is in the
+// new addrs list.
+//   - If true, it updates ac.addrs and returns true. The ac will keep using
+//     the existing connection.
+//   - If false, it does nothing and returns false.
+func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
+	if ac.state == connectivity.Shutdown ||
+		ac.state == connectivity.TransientFailure ||
+		ac.state == connectivity.Idle {
+		ac.addrs = addrs
+		return true
+	}
+
+	if equalAddresses(ac.addrs, addrs) {
+		return true
+	}
+
+	if ac.state == connectivity.Connecting {
+		return false
+	}
+
+	// ac.state is Ready, try to find the connected address.
+	var curAddrFound bool
+	for _, a := range addrs {
+		a.ServerName = ac.cc.getServerName(a)
+		if reflect.DeepEqual(ac.curAddr, a) {
+			curAddrFound = true
+			break
+		}
+	}
+	channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
+	if curAddrFound {
+		ac.addrs = addrs
+	}
+
+	return curAddrFound
+}
+
+// getServerName determines the serverName to be used in the connection
+// handshake. The default value for the serverName is the authority on the
+// ClientConn, which either comes from the user's dial target or through an
+// authority override specified using the WithAuthority dial option. Name
+// resolvers can specify a per-address override for the serverName through the
+// resolver.Address.ServerName field which is used only if the WithAuthority
+// dial option was not used. The rationale is that per-address authority
+// overrides specified by the name resolver can represent a security risk, while
+// an override specified by the user is more dependable since they probably know
+// what they are doing.
+func (cc *ClientConn) getServerName(addr resolver.Address) string {
+	if cc.dopts.authority != "" {
+		return cc.dopts.authority
+	}
+	if addr.ServerName != "" {
+		return addr.ServerName
+	}
+	return cc.authority
+}
+
+func getMethodConfig(sc *ServiceConfig, method string) MethodConfig {
+	if sc == nil {
+		return MethodConfig{}
+	}
+	if m, ok := sc.Methods[method]; ok {
+		return m
+	}
+	i := strings.LastIndex(method, "/")
+	if m, ok := sc.Methods[method[:i+1]]; ok {
+		return m
+	}
+	return sc.Methods[""]
+}
+
+// GetMethodConfig gets the method config of the input method.
+// If there's an exact match for input method (i.e. /service/method), we return
+// the corresponding MethodConfig.
+// If there isn't an exact match for the input method, we look for the service's default
+// config under the service (i.e /service/) and then for the default for all services (empty string).
+//
+// If there is a default MethodConfig for the service, we return it.
+// Otherwise, we return an empty MethodConfig.
+func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
+	// TODO: Avoid the locking here.
+	cc.mu.RLock()
+	defer cc.mu.RUnlock()
+	return getMethodConfig(cc.sc, method)
+}
+
+func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
+	cc.mu.RLock()
+	defer cc.mu.RUnlock()
+	if cc.sc == nil {
+		return nil
+	}
+	return cc.sc.healthCheckConfig
+}
+
+func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
+	return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
+		Ctx:            ctx,
+		FullMethodName: method,
+	})
+}
+
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
+	if sc == nil {
+		// should never reach here.
+		return
+	}
+	cc.sc = sc
+	if configSelector != nil {
+		cc.safeConfigSelector.UpdateConfigSelector(configSelector)
+	}
+
+	if cc.sc.retryThrottling != nil {
+		newThrottler := &retryThrottler{
+			tokens: cc.sc.retryThrottling.MaxTokens,
+			max:    cc.sc.retryThrottling.MaxTokens,
+			thresh: cc.sc.retryThrottling.MaxTokens / 2,
+			ratio:  cc.sc.retryThrottling.TokenRatio,
+		}
+		cc.retryThrottler.Store(newThrottler)
+	} else {
+		cc.retryThrottler.Store((*retryThrottler)(nil))
+	}
+
+	var newBalancerName string
+	if cc.sc != nil && cc.sc.lbConfig != nil {
+		newBalancerName = cc.sc.lbConfig.name
+	} else {
+		var isGRPCLB bool
+		for _, a := range addrs {
+			if a.Type == resolver.GRPCLB {
+				isGRPCLB = true
+				break
+			}
+		}
+		if isGRPCLB {
+			newBalancerName = grpclbName
+		} else if cc.sc != nil && cc.sc.LB != nil {
+			newBalancerName = *cc.sc.LB
+		} else {
+			newBalancerName = PickFirstBalancerName
+		}
+	}
+	cc.balancerWrapper.switchTo(newBalancerName)
+}
+
+func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
+	cc.mu.RLock()
+	r := cc.resolverWrapper
+	cc.mu.RUnlock()
+	if r == nil {
+		return
+	}
+	go r.resolveNow(o)
+}
+
+// ResetConnectBackoff wakes up all subchannels in transient failure and causes
+// them to attempt another connection immediately.  It also resets the backoff
+// times used for subsequent attempts regardless of the current state.
+//
+// In general, this function should not be used.  Typical service or network
+// outages result in a reasonable client reconnection strategy by default.
+// However, if a previously unavailable network becomes available, this may be
+// used to trigger an immediate reconnect.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func (cc *ClientConn) ResetConnectBackoff() {
+	cc.mu.Lock()
+	conns := cc.conns
+	cc.mu.Unlock()
+	for ac := range conns {
+		ac.resetConnectBackoff()
+	}
+}
+
+// Close tears down the ClientConn and all underlying connections.
+func (cc *ClientConn) Close() error {
+	defer cc.cancel()
+
+	cc.mu.Lock()
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return ErrClientConnClosing
+	}
+	conns := cc.conns
+	cc.conns = nil
+	cc.csMgr.updateState(connectivity.Shutdown)
+
+	rWrapper := cc.resolverWrapper
+	cc.resolverWrapper = nil
+	bWrapper := cc.balancerWrapper
+	cc.mu.Unlock()
+
+	// The order of closing matters here since the balancer wrapper assumes the
+	// picker is closed before it is closed.
+	cc.blockingpicker.close()
+	if bWrapper != nil {
+		bWrapper.close()
+	}
+	if rWrapper != nil {
+		rWrapper.close()
+	}
+
+	for ac := range conns {
+		ac.tearDown(ErrClientConnClosing)
+	}
+	ted := &channelz.TraceEventDesc{
+		Desc:     "Channel deleted",
+		Severity: channelz.CtInfo,
+	}
+	if cc.dopts.channelzParentID != nil {
+		ted.Parent = &channelz.TraceEventDesc{
+			Desc:     fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()),
+			Severity: channelz.CtInfo,
+		}
+	}
+	channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
+	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+	// trace reference to the entity being deleted, and thus prevent it from being
+	// deleted right away.
+	channelz.RemoveEntry(cc.channelzID)
+
+	return nil
+}
+
+// addrConn is a network connection to a given address.
+type addrConn struct {
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	cc     *ClientConn
+	dopts  dialOptions
+	acbw   balancer.SubConn
+	scopts balancer.NewSubConnOptions
+
+	// transport is set when there's a viable transport (note: ac state may not be READY as LB channel
+	// health checking may require server to report healthy to set ac to READY), and is reset
+	// to nil when the current transport should no longer be used to create a stream (e.g. after GoAway
+	// is received, transport is closed, ac has been torn down).
+	transport transport.ClientTransport // The current transport.
+
+	mu      sync.Mutex
+	curAddr resolver.Address   // The current address.
+	addrs   []resolver.Address // All addresses that the resolver resolved to.
+
+	// Use updateConnectivityState for updating addrConn's connectivity state.
+	state connectivity.State
+
+	backoffIdx   int // Needs to be stateful for resetConnectBackoff.
+	resetBackoff chan struct{}
+
+	channelzID *channelz.Identifier
+	czData     *channelzData
+}
+
+// Note: this requires a lock on ac.mu.
+func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
+	if ac.state == s {
+		return
+	}
+	ac.state = s
+	channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
+	ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
+}
+
+// adjustParams updates parameters used to create transports upon
+// receiving a GoAway.
+func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
+	switch r {
+	case transport.GoAwayTooManyPings:
+		v := 2 * ac.dopts.copts.KeepaliveParams.Time
+		ac.cc.mu.Lock()
+		if v > ac.cc.mkp.Time {
+			ac.cc.mkp.Time = v
+		}
+		ac.cc.mu.Unlock()
+	}
+}
+
+func (ac *addrConn) resetTransport() {
+	ac.mu.Lock()
+	if ac.state == connectivity.Shutdown {
+		ac.mu.Unlock()
+		return
+	}
+
+	addrs := ac.addrs
+	backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
+	// This will be the duration that dial gets to finish.
+	dialDuration := minConnectTimeout
+	if ac.dopts.minConnectTimeout != nil {
+		dialDuration = ac.dopts.minConnectTimeout()
+	}
+
+	if dialDuration < backoffFor {
+		// Give dial more time as we keep failing to connect.
+		dialDuration = backoffFor
+	}
+	// We can potentially spend all the time trying the first address, and
+	// if the server accepts the connection and then hangs, the following
+	// addresses will never be tried.
+	//
+	// The spec doesn't mention what should be done for multiple addresses.
+	// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
+	connectDeadline := time.Now().Add(dialDuration)
+
+	ac.updateConnectivityState(connectivity.Connecting, nil)
+	ac.mu.Unlock()
+
+	if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil {
+		ac.cc.resolveNow(resolver.ResolveNowOptions{})
+		// After exhausting all addresses, the addrConn enters
+		// TRANSIENT_FAILURE.
+		ac.mu.Lock()
+		if ac.state == connectivity.Shutdown {
+			ac.mu.Unlock()
+			return
+		}
+		ac.updateConnectivityState(connectivity.TransientFailure, err)
+
+		// Backoff.
+		b := ac.resetBackoff
+		ac.mu.Unlock()
+
+		timer := time.NewTimer(backoffFor)
+		select {
+		case <-timer.C:
+			ac.mu.Lock()
+			ac.backoffIdx++
+			ac.mu.Unlock()
+		case <-b:
+			timer.Stop()
+		case <-ac.ctx.Done():
+			timer.Stop()
+			return
+		}
+
+		ac.mu.Lock()
+		if ac.state != connectivity.Shutdown {
+			ac.updateConnectivityState(connectivity.Idle, err)
+		}
+		ac.mu.Unlock()
+		return
+	}
+	// Success; reset backoff.
+	ac.mu.Lock()
+	ac.backoffIdx = 0
+	ac.mu.Unlock()
+}
+
+// tryAllAddrs tries to creates a connection to the addresses, and stop when at
+// the first successful one. It returns an error if no address was successfully
+// connected, or updates ac appropriately with the new transport.
+func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error {
+	var firstConnErr error
+	for _, addr := range addrs {
+		ac.mu.Lock()
+		if ac.state == connectivity.Shutdown {
+			ac.mu.Unlock()
+			return errConnClosing
+		}
+
+		ac.cc.mu.RLock()
+		ac.dopts.copts.KeepaliveParams = ac.cc.mkp
+		ac.cc.mu.RUnlock()
+
+		copts := ac.dopts.copts
+		if ac.scopts.CredsBundle != nil {
+			copts.CredsBundle = ac.scopts.CredsBundle
+		}
+		ac.mu.Unlock()
+
+		channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
+
+		err := ac.createTransport(addr, copts, connectDeadline)
+		if err == nil {
+			return nil
+		}
+		if firstConnErr == nil {
+			firstConnErr = err
+		}
+		ac.cc.updateConnectionError(err)
+	}
+
+	// Couldn't connect to any address.
+	return firstConnErr
+}
+
+// createTransport creates a connection to addr. It returns an error if the
+// address was not successfully connected, or updates ac appropriately with the
+// new transport.
+func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
+	addr.ServerName = ac.cc.getServerName(addr)
+	hctx, hcancel := context.WithCancel(ac.ctx)
+
+	onClose := func(r transport.GoAwayReason) {
+		ac.mu.Lock()
+		defer ac.mu.Unlock()
+		// adjust params based on GoAwayReason
+		ac.adjustParams(r)
+		if ac.state == connectivity.Shutdown {
+			// Already shut down.  tearDown() already cleared the transport and
+			// canceled hctx via ac.ctx, and we expected this connection to be
+			// closed, so do nothing here.
+			return
+		}
+		hcancel()
+		if ac.transport == nil {
+			// We're still connecting to this address, which could error.  Do
+			// not update the connectivity state or resolve; these will happen
+			// at the end of the tryAllAddrs connection loop in the event of an
+			// error.
+			return
+		}
+		ac.transport = nil
+		// Refresh the name resolver on any connection loss.
+		ac.cc.resolveNow(resolver.ResolveNowOptions{})
+		// Always go idle and wait for the LB policy to initiate a new
+		// connection attempt.
+		ac.updateConnectivityState(connectivity.Idle, nil)
+	}
+
+	connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
+	defer cancel()
+	copts.ChannelzParentID = ac.channelzID
+
+	newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
+	if err != nil {
+		if logger.V(2) {
+			logger.Infof("Creating new client transport to %q: %v", addr, err)
+		}
+		// newTr is either nil, or closed.
+		hcancel()
+		channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
+		return err
+	}
+
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	if ac.state == connectivity.Shutdown {
+		// This can happen if the subConn was removed while in `Connecting`
+		// state. tearDown() would have set the state to `Shutdown`, but
+		// would not have closed the transport since ac.transport would not
+		// have been set at that point.
+		//
+		// We run this in a goroutine because newTr.Close() calls onClose()
+		// inline, which requires locking ac.mu.
+		//
+		// The error we pass to Close() is immaterial since there are no open
+		// streams at this point, so no trailers with error details will be sent
+		// out. We just need to pass a non-nil error.
+		go newTr.Close(transport.ErrConnClosing)
+		return nil
+	}
+	if hctx.Err() != nil {
+		// onClose was already called for this connection, but the connection
+		// was successfully established first.  Consider it a success and set
+		// the new state to Idle.
+		ac.updateConnectivityState(connectivity.Idle, nil)
+		return nil
+	}
+	ac.curAddr = addr
+	ac.transport = newTr
+	ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
+	return nil
+}
+
+// startHealthCheck starts the health checking stream (RPC) to watch the health
+// stats of this connection if health checking is requested and configured.
+//
+// LB channel health checking is enabled when all requirements below are met:
+// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
+// 2. internal.HealthCheckFunc is set by importing the grpc/health package
+// 3. a service config with non-empty healthCheckConfig field is provided
+// 4. the load balancer requests it
+//
+// It sets addrConn to READY if the health checking stream is not started.
+//
+// Caller must hold ac.mu.
+func (ac *addrConn) startHealthCheck(ctx context.Context) {
+	var healthcheckManagingState bool
+	defer func() {
+		if !healthcheckManagingState {
+			ac.updateConnectivityState(connectivity.Ready, nil)
+		}
+	}()
+
+	if ac.cc.dopts.disableHealthCheck {
+		return
+	}
+	healthCheckConfig := ac.cc.healthCheckConfig()
+	if healthCheckConfig == nil {
+		return
+	}
+	if !ac.scopts.HealthCheckEnabled {
+		return
+	}
+	healthCheckFunc := ac.cc.dopts.healthCheckFunc
+	if healthCheckFunc == nil {
+		// The health package is not imported to set health check function.
+		//
+		// TODO: add a link to the health check doc in the error message.
+		channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.")
+		return
+	}
+
+	healthcheckManagingState = true
+
+	// Set up the health check helper functions.
+	currentTr := ac.transport
+	newStream := func(method string) (interface{}, error) {
+		ac.mu.Lock()
+		if ac.transport != currentTr {
+			ac.mu.Unlock()
+			return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
+		}
+		ac.mu.Unlock()
+		return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
+	}
+	setConnectivityState := func(s connectivity.State, lastErr error) {
+		ac.mu.Lock()
+		defer ac.mu.Unlock()
+		if ac.transport != currentTr {
+			return
+		}
+		ac.updateConnectivityState(s, lastErr)
+	}
+	// Start the health checking stream.
+	go func() {
+		err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
+		if err != nil {
+			if status.Code(err) == codes.Unimplemented {
+				channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
+			} else {
+				channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err)
+			}
+		}
+	}()
+}
+
+func (ac *addrConn) resetConnectBackoff() {
+	ac.mu.Lock()
+	close(ac.resetBackoff)
+	ac.backoffIdx = 0
+	ac.resetBackoff = make(chan struct{})
+	ac.mu.Unlock()
+}
+
+// getReadyTransport returns the transport if ac's state is READY or nil if not.
+func (ac *addrConn) getReadyTransport() transport.ClientTransport {
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	if ac.state == connectivity.Ready {
+		return ac.transport
+	}
+	return nil
+}
+
+// tearDown starts to tear down the addrConn.
+//
+// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
+// will leak. In most cases, call cc.removeAddrConn() instead.
+func (ac *addrConn) tearDown(err error) {
+	ac.mu.Lock()
+	if ac.state == connectivity.Shutdown {
+		ac.mu.Unlock()
+		return
+	}
+	curTr := ac.transport
+	ac.transport = nil
+	// We have to set the state to Shutdown before anything else to prevent races
+	// between setting the state and logic that waits on context cancellation / etc.
+	ac.updateConnectivityState(connectivity.Shutdown, nil)
+	ac.cancel()
+	ac.curAddr = resolver.Address{}
+	if err == errConnDrain && curTr != nil {
+		// GracefulClose(...) may be executed multiple times when
+		// i) receiving multiple GoAway frames from the server; or
+		// ii) there are concurrent name resolver/Balancer triggered
+		// address removal and GoAway.
+		// We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu.
+		ac.mu.Unlock()
+		curTr.GracefulClose()
+		ac.mu.Lock()
+	}
+	channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
+		Desc:     "Subchannel deleted",
+		Severity: channelz.CtInfo,
+		Parent: &channelz.TraceEventDesc{
+			Desc:     fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()),
+			Severity: channelz.CtInfo,
+		},
+	})
+	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+	// trace reference to the entity being deleted, and thus prevent it from
+	// being deleted right away.
+	channelz.RemoveEntry(ac.channelzID)
+	ac.mu.Unlock()
+}
+
+func (ac *addrConn) getState() connectivity.State {
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	return ac.state
+}
+
+func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
+	ac.mu.Lock()
+	addr := ac.curAddr.Addr
+	ac.mu.Unlock()
+	return &channelz.ChannelInternalMetric{
+		State:                    ac.getState(),
+		Target:                   addr,
+		CallsStarted:             atomic.LoadInt64(&ac.czData.callsStarted),
+		CallsSucceeded:           atomic.LoadInt64(&ac.czData.callsSucceeded),
+		CallsFailed:              atomic.LoadInt64(&ac.czData.callsFailed),
+		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)),
+	}
+}
+
+func (ac *addrConn) incrCallsStarted() {
+	atomic.AddInt64(&ac.czData.callsStarted, 1)
+	atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano())
+}
+
+func (ac *addrConn) incrCallsSucceeded() {
+	atomic.AddInt64(&ac.czData.callsSucceeded, 1)
+}
+
+func (ac *addrConn) incrCallsFailed() {
+	atomic.AddInt64(&ac.czData.callsFailed, 1)
+}
+
+type retryThrottler struct {
+	max    float64
+	thresh float64
+	ratio  float64
+
+	mu     sync.Mutex
+	tokens float64 // TODO(dfawley): replace with atomic and remove lock.
+}
+
+// throttle subtracts a retry token from the pool and returns whether a retry
+// should be throttled (disallowed) based upon the retry throttling policy in
+// the service config.
+func (rt *retryThrottler) throttle() bool {
+	if rt == nil {
+		return false
+	}
+	rt.mu.Lock()
+	defer rt.mu.Unlock()
+	rt.tokens--
+	if rt.tokens < 0 {
+		rt.tokens = 0
+	}
+	return rt.tokens <= rt.thresh
+}
+
+func (rt *retryThrottler) successfulRPC() {
+	if rt == nil {
+		return
+	}
+	rt.mu.Lock()
+	defer rt.mu.Unlock()
+	rt.tokens += rt.ratio
+	if rt.tokens > rt.max {
+		rt.tokens = rt.max
+	}
+}
+
+type channelzChannel struct {
+	cc *ClientConn
+}
+
+func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
+	return c.cc.channelzMetric()
+}
+
+// ErrClientConnTimeout indicates that the ClientConn cannot establish the
+// underlying connections within the specified timeout.
+//
+// Deprecated: This error is never returned by grpc and should not be
+// referenced by users.
+var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+
+func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
+	for _, rb := range cc.dopts.resolvers {
+		if scheme == rb.Scheme() {
+			return rb
+		}
+	}
+	return resolver.Get(scheme)
+}
+
+func (cc *ClientConn) updateConnectionError(err error) {
+	cc.lceMu.Lock()
+	cc.lastConnectionError = err
+	cc.lceMu.Unlock()
+}
+
+func (cc *ClientConn) connectionError() error {
+	cc.lceMu.Lock()
+	defer cc.lceMu.Unlock()
+	return cc.lastConnectionError
+}
+
+func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
+	channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target)
+
+	var rb resolver.Builder
+	parsedTarget, err := parseTarget(cc.target)
+	if err != nil {
+		channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
+	} else {
+		channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
+		rb = cc.getResolver(parsedTarget.URL.Scheme)
+		if rb != nil {
+			cc.parsedTarget = parsedTarget
+			return rb, nil
+		}
+	}
+
+	// We are here because the user's dial target did not contain a scheme or
+	// specified an unregistered scheme. We should fallback to the default
+	// scheme, except when a custom dialer is specified in which case, we should
+	// always use passthrough scheme.
+	defScheme := resolver.GetDefaultScheme()
+	channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme)
+	canonicalTarget := defScheme + ":///" + cc.target
+
+	parsedTarget, err = parseTarget(canonicalTarget)
+	if err != nil {
+		channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err)
+		return nil, err
+	}
+	channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
+	rb = cc.getResolver(parsedTarget.URL.Scheme)
+	if rb == nil {
+		return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
+	}
+	cc.parsedTarget = parsedTarget
+	return rb, nil
+}
+
+// parseTarget uses RFC 3986 semantics to parse the given target into a
+// resolver.Target struct containing scheme, authority and url. Query
+// params are stripped from the endpoint.
+func parseTarget(target string) (resolver.Target, error) {
+	u, err := url.Parse(target)
+	if err != nil {
+		return resolver.Target{}, err
+	}
+
+	return resolver.Target{
+		Scheme:    u.Scheme,
+		Authority: u.Host,
+		URL:       *u,
+	}, nil
+}
+
+// Determine channel authority. The order of precedence is as follows:
+// - user specified authority override using `WithAuthority` dial option
+// - creds' notion of server name for the authentication handshake
+// - endpoint from dial target of the form "scheme://[authority]/endpoint"
+func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) {
+	// Historically, we had two options for users to specify the serverName or
+	// authority for a channel. One was through the transport credentials
+	// (either in its constructor, or through the OverrideServerName() method).
+	// The other option (for cases where WithInsecure() dial option was used)
+	// was to use the WithAuthority() dial option.
+	//
+	// A few things have changed since:
+	// - `insecure` package with an implementation of the `TransportCredentials`
+	//   interface for the insecure case
+	// - WithAuthority() dial option support for secure credentials
+	authorityFromCreds := ""
+	if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" {
+		authorityFromCreds = creds.Info().ServerName
+	}
+	authorityFromDialOption := dopts.authority
+	if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
+		return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
+	}
+
+	switch {
+	case authorityFromDialOption != "":
+		return authorityFromDialOption, nil
+	case authorityFromCreds != "":
+		return authorityFromCreds, nil
+	case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
+		// TODO: remove when the unix resolver implements optional interface to
+		// return channel authority.
+		return "localhost", nil
+	case strings.HasPrefix(endpoint, ":"):
+		return "localhost" + endpoint, nil
+	default:
+		// TODO: Define an optional interface on the resolver builder to return
+		// the channel authority given the user's dial target. For resolvers
+		// which don't implement this interface, we will use the endpoint from
+		// "scheme://authority/endpoint" as the default authority.
+		return endpoint, nil
+	}
+}
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
new file mode 100644
index 000000000..129776547
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"google.golang.org/grpc/encoding"
+	_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
+)
+
+// baseCodec contains the functionality of both Codec and encoding.Codec, but
+// omits the name/string, which vary between the two and are not needed for
+// anything besides the registry in the encoding package.
+type baseCodec interface {
+	Marshal(v interface{}) ([]byte, error)
+	Unmarshal(data []byte, v interface{}) error
+}
+
+var _ baseCodec = Codec(nil)
+var _ baseCodec = encoding.Codec(nil)
+
+// Codec defines the interface gRPC uses to encode and decode messages.
+// Note that implementations of this interface must be thread safe;
+// a Codec's methods can be called from concurrent goroutines.
+//
+// Deprecated: use encoding.Codec instead.
+type Codec interface {
+	// Marshal returns the wire format of v.
+	Marshal(v interface{}) ([]byte, error)
+	// Unmarshal parses the wire format into v.
+	Unmarshal(data []byte, v interface{}) error
+	// String returns the name of the Codec implementation.  This is unused by
+	// gRPC.
+	String() string
+}
diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
new file mode 100644
index 000000000..4cdc6ba7c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codegen.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+# This script serves as an example to demonstrate how to generate the gRPC-Go
+# interface and the related messages from .proto file.
+#
+# It assumes the installation of i) Google proto buffer compiler at
+# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
+# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
+# not, please install them first.
+#
+# We recommend running this script at $GOPATH/src.
+#
+# If this is not what you need, feel free to make your own scripts. Again, this
+# script is for demonstration purpose.
+#
+proto=$1
+protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
new file mode 100644
index 000000000..0b206a578
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codes/code_string.go
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package codes
+
+import "strconv"
+
+func (c Code) String() string {
+	switch c {
+	case OK:
+		return "OK"
+	case Canceled:
+		return "Canceled"
+	case Unknown:
+		return "Unknown"
+	case InvalidArgument:
+		return "InvalidArgument"
+	case DeadlineExceeded:
+		return "DeadlineExceeded"
+	case NotFound:
+		return "NotFound"
+	case AlreadyExists:
+		return "AlreadyExists"
+	case PermissionDenied:
+		return "PermissionDenied"
+	case ResourceExhausted:
+		return "ResourceExhausted"
+	case FailedPrecondition:
+		return "FailedPrecondition"
+	case Aborted:
+		return "Aborted"
+	case OutOfRange:
+		return "OutOfRange"
+	case Unimplemented:
+		return "Unimplemented"
+	case Internal:
+		return "Internal"
+	case Unavailable:
+		return "Unavailable"
+	case DataLoss:
+		return "DataLoss"
+	case Unauthenticated:
+		return "Unauthenticated"
+	default:
+		return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
+	}
+}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
new file mode 100644
index 000000000..11b106182
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -0,0 +1,244 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package codes defines the canonical error codes used by gRPC. It is
+// consistent across various languages.
+package codes // import "google.golang.org/grpc/codes"
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
+type Code uint32
+
+const (
+	// OK is returned on success.
+	OK Code = 0
+
+	// Canceled indicates the operation was canceled (typically by the caller).
+	//
+	// The gRPC framework will generate this error code when cancellation
+	// is requested.
+	Canceled Code = 1
+
+	// Unknown error. An example of where this error may be returned is
+	// if a Status value received from another address space belongs to
+	// an error-space that is not known in this address space. Also
+	// errors raised by APIs that do not return enough error information
+	// may be converted to this error.
+	//
+	// The gRPC framework will generate this error code in the above two
+	// mentioned cases.
+	Unknown Code = 2
+
+	// InvalidArgument indicates client specified an invalid argument.
+	// Note that this differs from FailedPrecondition. It indicates arguments
+	// that are problematic regardless of the state of the system
+	// (e.g., a malformed file name).
+	//
+	// This error code will not be generated by the gRPC framework.
+	InvalidArgument Code = 3
+
+	// DeadlineExceeded means operation expired before completion.
+	// For operations that change the state of the system, this error may be
+	// returned even if the operation has completed successfully. For
+	// example, a successful response from a server could have been delayed
+	// long enough for the deadline to expire.
+	//
+	// The gRPC framework will generate this error code when the deadline is
+	// exceeded.
+	DeadlineExceeded Code = 4
+
+	// NotFound means some requested entity (e.g., file or directory) was
+	// not found.
+	//
+	// This error code will not be generated by the gRPC framework.
+	NotFound Code = 5
+
+	// AlreadyExists means an attempt to create an entity failed because one
+	// already exists.
+	//
+	// This error code will not be generated by the gRPC framework.
+	AlreadyExists Code = 6
+
+	// PermissionDenied indicates the caller does not have permission to
+	// execute the specified operation. It must not be used for rejections
+	// caused by exhausting some resource (use ResourceExhausted
+	// instead for those errors). It must not be
+	// used if the caller cannot be identified (use Unauthenticated
+	// instead for those errors).
+	//
+	// This error code will not be generated by the gRPC core framework,
+	// but expect authentication middleware to use it.
+	PermissionDenied Code = 7
+
+	// ResourceExhausted indicates some resource has been exhausted, perhaps
+	// a per-user quota, or perhaps the entire file system is out of space.
+	//
+	// This error code will be generated by the gRPC framework in
+	// out-of-memory and server overload situations, or when a message is
+	// larger than the configured maximum size.
+	ResourceExhausted Code = 8
+
+	// FailedPrecondition indicates operation was rejected because the
+	// system is not in a state required for the operation's execution.
+	// For example, directory to be deleted may be non-empty, an rmdir
+	// operation is applied to a non-directory, etc.
+	//
+	// A litmus test that may help a service implementor in deciding
+	// between FailedPrecondition, Aborted, and Unavailable:
+	//  (a) Use Unavailable if the client can retry just the failing call.
+	//  (b) Use Aborted if the client should retry at a higher-level
+	//      (e.g., restarting a read-modify-write sequence).
+	//  (c) Use FailedPrecondition if the client should not retry until
+	//      the system state has been explicitly fixed. E.g., if an "rmdir"
+	//      fails because the directory is non-empty, FailedPrecondition
+	//      should be returned since the client should not retry unless
+	//      they have first fixed up the directory by deleting files from it.
+	//  (d) Use FailedPrecondition if the client performs conditional
+	//      REST Get/Update/Delete on a resource and the resource on the
+	//      server does not match the condition. E.g., conflicting
+	//      read-modify-write on the same resource.
+	//
+	// This error code will not be generated by the gRPC framework.
+	FailedPrecondition Code = 9
+
+	// Aborted indicates the operation was aborted, typically due to a
+	// concurrency issue like sequencer check failures, transaction aborts,
+	// etc.
+	//
+	// See litmus test above for deciding between FailedPrecondition,
+	// Aborted, and Unavailable.
+	//
+	// This error code will not be generated by the gRPC framework.
+	Aborted Code = 10
+
+	// OutOfRange means operation was attempted past the valid range.
+	// E.g., seeking or reading past end of file.
+	//
+	// Unlike InvalidArgument, this error indicates a problem that may
+	// be fixed if the system state changes. For example, a 32-bit file
+	// system will generate InvalidArgument if asked to read at an
+	// offset that is not in the range [0,2^32-1], but it will generate
+	// OutOfRange if asked to read from an offset past the current
+	// file size.
+	//
+	// There is a fair bit of overlap between FailedPrecondition and
+	// OutOfRange. We recommend using OutOfRange (the more specific
+	// error) when it applies so that callers who are iterating through
+	// a space can easily look for an OutOfRange error to detect when
+	// they are done.
+	//
+	// This error code will not be generated by the gRPC framework.
+	OutOfRange Code = 11
+
+	// Unimplemented indicates operation is not implemented or not
+	// supported/enabled in this service.
+	//
+	// This error code will be generated by the gRPC framework. Most
+	// commonly, you will see this error code when a method implementation
+	// is missing on the server. It can also be generated for unknown
+	// compression algorithms or a disagreement as to whether an RPC should
+	// be streaming.
+	Unimplemented Code = 12
+
+	// Internal errors. Means some invariants expected by underlying
+	// system has been broken. If you see one of these errors,
+	// something is very broken.
+	//
+	// This error code will be generated by the gRPC framework in several
+	// internal error conditions.
+	Internal Code = 13
+
+	// Unavailable indicates the service is currently unavailable.
+	// This is a most likely a transient condition and may be corrected
+	// by retrying with a backoff. Note that it is not always safe to retry
+	// non-idempotent operations.
+	//
+	// See litmus test above for deciding between FailedPrecondition,
+	// Aborted, and Unavailable.
+	//
+	// This error code will be generated by the gRPC framework during
+	// abrupt shutdown of a server process or network connection.
+	Unavailable Code = 14
+
+	// DataLoss indicates unrecoverable data loss or corruption.
+	//
+	// This error code will not be generated by the gRPC framework.
+	DataLoss Code = 15
+
+	// Unauthenticated indicates the request does not have valid
+	// authentication credentials for the operation.
+	//
+	// The gRPC framework will generate this error code when the
+	// authentication metadata is invalid or a Credentials callback fails,
+	// but also expect authentication middleware to generate it.
+	Unauthenticated Code = 16
+
+	_maxCode = 17
+)
+
+var strToCode = map[string]Code{
+	`"OK"`: OK,
+	`"CANCELLED"`:/* [sic] */ Canceled,
+	`"UNKNOWN"`:             Unknown,
+	`"INVALID_ARGUMENT"`:    InvalidArgument,
+	`"DEADLINE_EXCEEDED"`:   DeadlineExceeded,
+	`"NOT_FOUND"`:           NotFound,
+	`"ALREADY_EXISTS"`:      AlreadyExists,
+	`"PERMISSION_DENIED"`:   PermissionDenied,
+	`"RESOURCE_EXHAUSTED"`:  ResourceExhausted,
+	`"FAILED_PRECONDITION"`: FailedPrecondition,
+	`"ABORTED"`:             Aborted,
+	`"OUT_OF_RANGE"`:        OutOfRange,
+	`"UNIMPLEMENTED"`:       Unimplemented,
+	`"INTERNAL"`:            Internal,
+	`"UNAVAILABLE"`:         Unavailable,
+	`"DATA_LOSS"`:           DataLoss,
+	`"UNAUTHENTICATED"`:     Unauthenticated,
+}
+
+// UnmarshalJSON unmarshals b into the Code.
+func (c *Code) UnmarshalJSON(b []byte) error {
+	// From json.Unmarshaler: By convention, to approximate the behavior of
+	// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
+	// a no-op.
+	if string(b) == "null" {
+		return nil
+	}
+	if c == nil {
+		return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+	}
+
+	if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
+		if ci >= _maxCode {
+			return fmt.Errorf("invalid code: %q", ci)
+		}
+
+		*c = Code(ci)
+		return nil
+	}
+
+	if jc, ok := strToCode[string(b)]; ok {
+		*c = jc
+		return nil
+	}
+	return fmt.Errorf("invalid code: %q", string(b))
+}
diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go
new file mode 100644
index 000000000..4a8992642
--- /dev/null
+++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go
@@ -0,0 +1,94 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package connectivity defines connectivity semantics.
+// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
+package connectivity
+
+import (
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("core")
+
+// State indicates the state of connectivity.
+// It can be the state of a ClientConn or SubConn.
+type State int
+
+func (s State) String() string {
+	switch s {
+	case Idle:
+		return "IDLE"
+	case Connecting:
+		return "CONNECTING"
+	case Ready:
+		return "READY"
+	case TransientFailure:
+		return "TRANSIENT_FAILURE"
+	case Shutdown:
+		return "SHUTDOWN"
+	default:
+		logger.Errorf("unknown connectivity state: %d", s)
+		return "INVALID_STATE"
+	}
+}
+
+const (
+	// Idle indicates the ClientConn is idle.
+	Idle State = iota
+	// Connecting indicates the ClientConn is connecting.
+	Connecting
+	// Ready indicates the ClientConn is ready for work.
+	Ready
+	// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
+	TransientFailure
+	// Shutdown indicates the ClientConn has started shutting down.
+	Shutdown
+)
+
+// ServingMode indicates the current mode of operation of the server.
+//
+// Only xDS enabled gRPC servers currently report their serving mode.
+type ServingMode int
+
+const (
+	// ServingModeStarting indicates that the server is starting up.
+	ServingModeStarting ServingMode = iota
+	// ServingModeServing indicates that the server contains all required
+	// configuration and is serving RPCs.
+	ServingModeServing
+	// ServingModeNotServing indicates that the server is not accepting new
+	// connections. Existing connections will be closed gracefully, allowing
+	// in-progress RPCs to complete. A server enters this mode when it does not
+	// contain the required configuration to serve RPCs.
+	ServingModeNotServing
+)
+
+func (s ServingMode) String() string {
+	switch s {
+	case ServingModeStarting:
+		return "STARTING"
+	case ServingModeServing:
+		return "SERVING"
+	case ServingModeNotServing:
+		return "NOT_SERVING"
+	default:
+		logger.Errorf("unknown serving mode: %d", s)
+		return "INVALID_MODE"
+	}
+}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
new file mode 100644
index 000000000..5feac3aa0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -0,0 +1,291 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package credentials implements various credentials supported by gRPC library,
+// which encapsulate all the state needed by a client to authenticate with a
+// server and make various assertions, e.g., about the client's identity, role,
+// or whether it is authorized to make a particular call.
+package credentials // import "google.golang.org/grpc/credentials"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/grpc/attributes"
+	icredentials "google.golang.org/grpc/internal/credentials"
+)
+
+// PerRPCCredentials defines the common interface for the credentials which need to
+// attach security information to every RPC (e.g., oauth2).
+type PerRPCCredentials interface {
+	// GetRequestMetadata gets the current request metadata, refreshing tokens
+	// if required. This should be called by the transport layer on each
+	// request, and the data should be populated in headers or other
+	// context. If a status code is returned, it will be used as the status for
+	// the RPC (restricted to an allowable set of codes as defined by gRFC
+	// A54). uri is the URI of the entry point for the request.  When supported
+	// by the underlying implementation, ctx can be used for timeout and
+	// cancellation. Additionally, RequestInfo data will be available via ctx
+	// to this call.  TODO(zhaoq): Define the set of the qualified keys instead
+	// of leaving it as an arbitrary string.
+	GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
+	// RequireTransportSecurity indicates whether the credentials requires
+	// transport security.
+	RequireTransportSecurity() bool
+}
+
+// SecurityLevel defines the protection level on an established connection.
+//
+// This API is experimental.
+type SecurityLevel int
+
+const (
+	// InvalidSecurityLevel indicates an invalid security level.
+	// The zero SecurityLevel value is invalid for backward compatibility.
+	InvalidSecurityLevel SecurityLevel = iota
+	// NoSecurity indicates a connection is insecure.
+	NoSecurity
+	// IntegrityOnly indicates a connection only provides integrity protection.
+	IntegrityOnly
+	// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
+	PrivacyAndIntegrity
+)
+
+// String returns SecurityLevel in a string format.
+func (s SecurityLevel) String() string {
+	switch s {
+	case NoSecurity:
+		return "NoSecurity"
+	case IntegrityOnly:
+		return "IntegrityOnly"
+	case PrivacyAndIntegrity:
+		return "PrivacyAndIntegrity"
+	}
+	return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
+}
+
+// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
+// It should be embedded in a struct implementing AuthInfo to provide additional information
+// about the credentials.
+//
+// This API is experimental.
+type CommonAuthInfo struct {
+	SecurityLevel SecurityLevel
+}
+
+// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
+func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
+	return c
+}
+
+// ProtocolInfo provides information regarding the gRPC wire protocol version,
+// security protocol, security protocol version in use, server name, etc.
+type ProtocolInfo struct {
+	// ProtocolVersion is the gRPC wire protocol version.
+	ProtocolVersion string
+	// SecurityProtocol is the security protocol in use.
+	SecurityProtocol string
+	// SecurityVersion is the security protocol version.  It is a static version string from the
+	// credentials, not a value that reflects per-connection protocol negotiation.  To retrieve
+	// details about the credentials used for a connection, use the Peer's AuthInfo field instead.
+	//
+	// Deprecated: please use Peer.AuthInfo.
+	SecurityVersion string
+	// ServerName is the user-configured server name.
+	ServerName string
+}
+
+// AuthInfo defines the common interface for the auth information the users are interested in.
+// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
+// information about the credentials in it.
+type AuthInfo interface {
+	AuthType() string
+}
+
+// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
+// and the caller should not close rawConn.
+var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
+
+// TransportCredentials defines the common interface for all the live gRPC wire
+// protocols and supported transport security protocols (e.g., TLS, SSL).
+type TransportCredentials interface {
+	// ClientHandshake does the authentication handshake specified by the
+	// corresponding authentication protocol on rawConn for clients. It returns
+	// the authenticated connection and the corresponding auth information
+	// about the connection.  The auth information should embed CommonAuthInfo
+	// to return additional information about the credentials. Implementations
+	// must use the provided context to implement timely cancellation.  gRPC
+	// will try to reconnect if the error returned is a temporary error
+	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).  If the
+	// returned error is a wrapper error, implementations should make sure that
+	// the error implements Temporary() to have the correct retry behaviors.
+	// Additionally, ClientHandshakeInfo data will be available via the context
+	// passed to this call.
+	//
+	// The second argument to this method is the `:authority` header value used
+	// while creating new streams on this connection after authentication
+	// succeeds. Implementations must use this as the server name during the
+	// authentication handshake.
+	//
+	// If the returned net.Conn is closed, it MUST close the net.Conn provided.
+	ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
+	// ServerHandshake does the authentication handshake for servers. It returns
+	// the authenticated connection and the corresponding auth information about
+	// the connection. The auth information should embed CommonAuthInfo to return additional information
+	// about the credentials.
+	//
+	// If the returned net.Conn is closed, it MUST close the net.Conn provided.
+	ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
+	// Info provides the ProtocolInfo of this TransportCredentials.
+	Info() ProtocolInfo
+	// Clone makes a copy of this TransportCredentials.
+	Clone() TransportCredentials
+	// OverrideServerName specifies the value used for the following:
+	// - verifying the hostname on the returned certificates
+	// - as SNI in the client's handshake to support virtual hosting
+	// - as the value for `:authority` header at stream creation time
+	//
+	// Deprecated: use grpc.WithAuthority instead. Will be supported
+	// throughout 1.x.
+	OverrideServerName(string) error
+}
+
+// Bundle is a combination of TransportCredentials and PerRPCCredentials.
+//
+// It also contains a mode switching method, so it can be used as a combination
+// of different credential policies.
+//
+// Bundle cannot be used together with individual TransportCredentials.
+// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
+//
+// This API is experimental.
+type Bundle interface {
+	// TransportCredentials returns the transport credentials from the Bundle.
+	//
+	// Implementations must return non-nil transport credentials. If transport
+	// security is not needed by the Bundle, implementations may choose to
+	// return insecure.NewCredentials().
+	TransportCredentials() TransportCredentials
+
+	// PerRPCCredentials returns the per-RPC credentials from the Bundle.
+	//
+	// May be nil if per-RPC credentials are not needed.
+	PerRPCCredentials() PerRPCCredentials
+
+	// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
+	// existing Bundle may cause races.
+	//
+	// NewWithMode returns nil if the requested mode is not supported.
+	NewWithMode(mode string) (Bundle, error)
+}
+
+// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
+//
+// This API is experimental.
+type RequestInfo struct {
+	// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
+	Method string
+	// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
+	AuthInfo AuthInfo
+}
+
+// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
+//
+// This API is experimental.
+func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
+	ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
+	return ri, ok
+}
+
+// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
+// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
+// balancer etc. Individual credential implementations control the actual
+// format of the data that they are willing to receive.
+//
+// This API is experimental.
+type ClientHandshakeInfo struct {
+	// Attributes contains the attributes for the address. It could be provided
+	// by the gRPC, resolver, balancer etc.
+	Attributes *attributes.Attributes
+}
+
+// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
+// in ctx.
+//
+// This API is experimental.
+func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
+	chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
+	return chi
+}
+
+// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
+// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
+// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
+//
+// This API is experimental.
+func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
+	type internalInfo interface {
+		GetCommonAuthInfo() CommonAuthInfo
+	}
+	if ai == nil {
+		return errors.New("AuthInfo is nil")
+	}
+	if ci, ok := ai.(internalInfo); ok {
+		// CommonAuthInfo.SecurityLevel has an invalid value.
+		if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
+			return nil
+		}
+		if ci.GetCommonAuthInfo().SecurityLevel < level {
+			return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
+		}
+	}
+	// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
+	return nil
+}
+
+// ChannelzSecurityInfo defines the interface that security protocols should implement
+// in order to provide security info to channelz.
+//
+// This API is experimental.
+type ChannelzSecurityInfo interface {
+	GetSecurityValue() ChannelzSecurityValue
+}
+
+// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
+// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
+// and *OtherChannelzSecurityValue.
+//
+// This API is experimental.
+type ChannelzSecurityValue interface {
+	isChannelzSecurityValue()
+}
+
+// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
+// from GetSecurityValue(), which contains protocol specific security info. Note
+// the Value field will be sent to users of channelz requesting channel info, and
+// thus sensitive info should better be avoided.
+//
+// This API is experimental.
+type OtherChannelzSecurityValue struct {
+	ChannelzSecurityValue
+	Name  string
+	Value proto.Message
+}
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
new file mode 100644
index 000000000..82bee1443
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package insecure provides an implementation of the
+// credentials.TransportCredentials interface which disables transport security.
+package insecure
+
+import (
+	"context"
+	"net"
+
+	"google.golang.org/grpc/credentials"
+)
+
+// NewCredentials returns a credentials which disables transport security.
+//
+// Note that using this credentials with per-RPC credentials which require
+// transport security is incompatible and will cause grpc.Dial() to fail.
+func NewCredentials() credentials.TransportCredentials {
+	return insecureTC{}
+}
+
+// insecureTC implements the insecure transport credentials. The handshake
+// methods simply return the passed in net.Conn and set the security level to
+// NoSecurity.
+type insecureTC struct{}
+
+func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) Info() credentials.ProtocolInfo {
+	return credentials.ProtocolInfo{SecurityProtocol: "insecure"}
+}
+
+func (insecureTC) Clone() credentials.TransportCredentials {
+	return insecureTC{}
+}
+
+func (insecureTC) OverrideServerName(string) error {
+	return nil
+}
+
+// info contains the auth information for an insecure connection.
+// It implements the AuthInfo interface.
+type info struct {
+	credentials.CommonAuthInfo
+}
+
+// AuthType returns the type of info as a string.
+func (info) AuthType() string {
+	return "insecure"
+}
+
+// insecureBundle implements an insecure bundle.
+// An insecure bundle provides a thin wrapper around insecureTC to support
+// the credentials.Bundle interface.
+type insecureBundle struct{}
+
+// NewBundle returns a bundle with disabled transport security and no per rpc credential.
+func NewBundle() credentials.Bundle {
+	return insecureBundle{}
+}
+
+// NewWithMode returns a new insecure Bundle. The mode is ignored.
+func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) {
+	return insecureBundle{}, nil
+}
+
+// PerRPCCredentials returns an nil implementation as insecure
+// bundle does not support a per rpc credential.
+func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials {
+	return nil
+}
+
+// TransportCredentials returns the underlying insecure transport credential.
+func (insecureBundle) TransportCredentials() credentials.TransportCredentials {
+	return NewCredentials()
+}
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
new file mode 100644
index 000000000..877b7cd21
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -0,0 +1,236 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"net"
+	"net/url"
+	"os"
+
+	credinternal "google.golang.org/grpc/internal/credentials"
+)
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+	State tls.ConnectionState
+	CommonAuthInfo
+	// This API is experimental.
+	SPIFFEID *url.URL
+}
+
+// AuthType returns the type of TLSInfo as a string.
+func (t TLSInfo) AuthType() string {
+	return "tls"
+}
+
+// GetSecurityValue returns security info requested by channelz.
+func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
+	v := &TLSChannelzSecurityValue{
+		StandardName: cipherSuiteLookup[t.State.CipherSuite],
+	}
+	// Currently there's no way to get LocalCertificate info from tls package.
+	if len(t.State.PeerCertificates) > 0 {
+		v.RemoteCertificate = t.State.PeerCertificates[0].Raw
+	}
+	return v
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+	// TLS configuration
+	config *tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+	return ProtocolInfo{
+		SecurityProtocol: "tls",
+		SecurityVersion:  "1.2",
+		ServerName:       c.config.ServerName,
+	}
+}
+
+func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+	// use local cfg to avoid clobbering ServerName if using multiple endpoints
+	cfg := credinternal.CloneTLSConfig(c.config)
+	if cfg.ServerName == "" {
+		serverName, _, err := net.SplitHostPort(authority)
+		if err != nil {
+			// If the authority had no host port or if the authority cannot be parsed, use it as-is.
+			serverName = authority
+		}
+		cfg.ServerName = serverName
+	}
+	conn := tls.Client(rawConn, cfg)
+	errChannel := make(chan error, 1)
+	go func() {
+		errChannel <- conn.Handshake()
+		close(errChannel)
+	}()
+	select {
+	case err := <-errChannel:
+		if err != nil {
+			conn.Close()
+			return nil, nil, err
+		}
+	case <-ctx.Done():
+		conn.Close()
+		return nil, nil, ctx.Err()
+	}
+	tlsInfo := TLSInfo{
+		State: conn.ConnectionState(),
+		CommonAuthInfo: CommonAuthInfo{
+			SecurityLevel: PrivacyAndIntegrity,
+		},
+	}
+	id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+	if id != nil {
+		tlsInfo.SPIFFEID = id
+	}
+	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+	conn := tls.Server(rawConn, c.config)
+	if err := conn.Handshake(); err != nil {
+		conn.Close()
+		return nil, nil, err
+	}
+	tlsInfo := TLSInfo{
+		State: conn.ConnectionState(),
+		CommonAuthInfo: CommonAuthInfo{
+			SecurityLevel: PrivacyAndIntegrity,
+		},
+	}
+	id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+	if id != nil {
+		tlsInfo.SPIFFEID = id
+	}
+	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
+}
+
+func (c *tlsCreds) Clone() TransportCredentials {
+	return NewTLS(c.config)
+}
+
+func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
+	c.config.ServerName = serverNameOverride
+	return nil
+}
+
+// NewTLS uses c to construct a TransportCredentials based on TLS.
+func NewTLS(c *tls.Config) TransportCredentials {
+	tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
+	tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
+	return tc
+}
+
+// NewClientTLSFromCert constructs TLS credentials from the provided root
+// certificate authority certificate(s) to validate server connections. If
+// certificates to establish the identity of the client need to be included in
+// the credentials (eg: for mTLS), use NewTLS instead, where a complete
+// tls.Config can be specified.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header
+// field) in requests.
+func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs TLS credentials from the provided root
+// certificate authority certificate file(s) to validate server connections. If
+// certificates to establish the identity of the client need to be included in
+// the credentials (eg: for mTLS), use NewTLS instead, where a complete
+// tls.Config can be specified.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header
+// field) in requests.
+func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
+	b, err := os.ReadFile(certFile)
+	if err != nil {
+		return nil, err
+	}
+	cp := x509.NewCertPool()
+	if !cp.AppendCertsFromPEM(b) {
+		return nil, fmt.Errorf("credentials: failed to append certificates")
+	}
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
+	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+	if err != nil {
+		return nil, err
+	}
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
+
+// TLSChannelzSecurityValue defines the struct that TLS protocol should return
+// from GetSecurityValue(), containing security info like cipher and certificate used.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type TLSChannelzSecurityValue struct {
+	ChannelzSecurityValue
+	StandardName      string
+	LocalCertificate  []byte
+	RemoteCertificate []byte
+}
+
+var cipherSuiteLookup = map[uint16]string{
+	tls.TLS_RSA_WITH_RC4_128_SHA:                "TLS_RSA_WITH_RC4_128_SHA",
+	tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:           "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA:            "TLS_RSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_256_CBC_SHA:            "TLS_RSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:         "TLS_RSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:         "TLS_RSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:        "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:          "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
+	tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:     "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+	tls.TLS_FALLBACK_SCSV:                       "TLS_FALLBACK_SCSV",
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA256:         "TLS_RSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+	tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305:    "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+	tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:  "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+	tls.TLS_AES_128_GCM_SHA256:                  "TLS_AES_128_GCM_SHA256",
+	tls.TLS_AES_256_GCM_SHA384:                  "TLS_AES_256_GCM_SHA384",
+	tls.TLS_CHACHA20_POLY1305_SHA256:            "TLS_CHACHA20_POLY1305_SHA256",
+}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
new file mode 100644
index 000000000..4866da101
--- /dev/null
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -0,0 +1,637 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"net"
+	"time"
+
+	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/channelz"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/insecure"
+	"google.golang.org/grpc/internal"
+	internalbackoff "google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/binarylog"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/stats"
+)
+
+func init() {
+	internal.AddGlobalDialOptions = func(opt ...DialOption) {
+		extraDialOptions = append(extraDialOptions, opt...)
+	}
+	internal.ClearGlobalDialOptions = func() {
+		extraDialOptions = nil
+	}
+	internal.WithBinaryLogger = withBinaryLogger
+	internal.JoinDialOptions = newJoinDialOption
+}
+
+// dialOptions configure a Dial call. dialOptions are set by the DialOption
+// values passed to Dial.
+type dialOptions struct {
+	unaryInt  UnaryClientInterceptor
+	streamInt StreamClientInterceptor
+
+	chainUnaryInts  []UnaryClientInterceptor
+	chainStreamInts []StreamClientInterceptor
+
+	cp                          Compressor
+	dc                          Decompressor
+	bs                          internalbackoff.Strategy
+	block                       bool
+	returnLastError             bool
+	timeout                     time.Duration
+	scChan                      <-chan ServiceConfig
+	authority                   string
+	binaryLogger                binarylog.Logger
+	copts                       transport.ConnectOptions
+	callOptions                 []CallOption
+	channelzParentID            *channelz.Identifier
+	disableServiceConfig        bool
+	disableRetry                bool
+	disableHealthCheck          bool
+	healthCheckFunc             internal.HealthChecker
+	minConnectTimeout           func() time.Duration
+	defaultServiceConfig        *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
+	defaultServiceConfigRawJSON *string
+	resolvers                   []resolver.Builder
+}
+
+// DialOption configures how we set up the connection.
+type DialOption interface {
+	apply(*dialOptions)
+}
+
+var extraDialOptions []DialOption
+
+// EmptyDialOption does not alter the dial configuration. It can be embedded in
+// another structure to build custom dial options.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type EmptyDialOption struct{}
+
+func (EmptyDialOption) apply(*dialOptions) {}
+
+// funcDialOption wraps a function that modifies dialOptions into an
+// implementation of the DialOption interface.
+type funcDialOption struct {
+	f func(*dialOptions)
+}
+
+func (fdo *funcDialOption) apply(do *dialOptions) {
+	fdo.f(do)
+}
+
+func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
+	return &funcDialOption{
+		f: f,
+	}
+}
+
+type joinDialOption struct {
+	opts []DialOption
+}
+
+func (jdo *joinDialOption) apply(do *dialOptions) {
+	for _, opt := range jdo.opts {
+		opt.apply(do)
+	}
+}
+
+func newJoinDialOption(opts ...DialOption) DialOption {
+	return &joinDialOption{opts: opts}
+}
+
+// WithWriteBufferSize determines how much data can be batched before doing a
+// write on the wire. The corresponding memory allocation for this buffer will
+// be twice the size to keep syscalls low. The default value for this buffer is
+// 32KB.
+//
+// Zero or negative values will disable the write buffer such that each write
+// will be on underlying connection. Note: A Send call may not directly
+// translate to a write.
+func WithWriteBufferSize(s int) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.WriteBufferSize = s
+	})
+}
+
+// WithReadBufferSize lets you set the size of read buffer, this determines how
+// much data can be read at most for each read syscall.
+//
+// The default value for this buffer is 32KB. Zero or negative values will
+// disable read buffer for a connection so data framer can access the
+// underlying conn directly.
+func WithReadBufferSize(s int) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.ReadBufferSize = s
+	})
+}
+
+// WithInitialWindowSize returns a DialOption which sets the value for initial
+// window size on a stream. The lower bound for window size is 64K and any value
+// smaller than that will be ignored.
+func WithInitialWindowSize(s int32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.InitialWindowSize = s
+	})
+}
+
+// WithInitialConnWindowSize returns a DialOption which sets the value for
+// initial window size on a connection. The lower bound for window size is 64K
+// and any value smaller than that will be ignored.
+func WithInitialConnWindowSize(s int32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.InitialConnWindowSize = s
+	})
+}
+
+// WithMaxMsgSize returns a DialOption which sets the maximum message size the
+// client can receive.
+//
+// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.  Will
+// be supported throughout 1.x.
+func WithMaxMsgSize(s int) DialOption {
+	return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
+}
+
+// WithDefaultCallOptions returns a DialOption which sets the default
+// CallOptions for calls over the connection.
+func WithDefaultCallOptions(cos ...CallOption) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.callOptions = append(o.callOptions, cos...)
+	})
+}
+
+// WithCodec returns a DialOption which sets a codec for message marshaling and
+// unmarshaling.
+//
+// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.  Will be
+// supported throughout 1.x.
+func WithCodec(c Codec) DialOption {
+	return WithDefaultCallOptions(CallCustomCodec(c))
+}
+
+// WithCompressor returns a DialOption which sets a Compressor to use for
+// message compression. It has lower priority than the compressor set by the
+// UseCompressor CallOption.
+//
+// Deprecated: use UseCompressor instead.  Will be supported throughout 1.x.
+func WithCompressor(cp Compressor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.cp = cp
+	})
+}
+
+// WithDecompressor returns a DialOption which sets a Decompressor to use for
+// incoming message decompression.  If incoming response messages are encoded
+// using the decompressor's Type(), it will be used.  Otherwise, the message
+// encoding will be used to look up the compressor registered via
+// encoding.RegisterCompressor, which will then be used to decompress the
+// message.  If no compressor is registered for the encoding, an Unimplemented
+// status error will be returned.
+//
+// Deprecated: use encoding.RegisterCompressor instead.  Will be supported
+// throughout 1.x.
+func WithDecompressor(dc Decompressor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.dc = dc
+	})
+}
+
+// WithServiceConfig returns a DialOption which has a channel to read the
+// service configuration.
+//
+// Deprecated: service config should be received through name resolver or via
+// WithDefaultServiceConfig, as specified at
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md.  Will be
+// removed in a future 1.x release.
+func WithServiceConfig(c <-chan ServiceConfig) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.scChan = c
+	})
+}
+
+// WithConnectParams configures the ClientConn to use the provided ConnectParams
+// for creating and maintaining connections to servers.
+//
+// The backoff configuration specified as part of the ConnectParams overrides
+// all defaults specified in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
+// using the backoff.DefaultConfig as a base, in cases where you want to
+// override only a subset of the backoff configuration.
+func WithConnectParams(p ConnectParams) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.bs = internalbackoff.Exponential{Config: p.Backoff}
+		o.minConnectTimeout = func() time.Duration {
+			return p.MinConnectTimeout
+		}
+	})
+}
+
+// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
+// when backing off after failed connection attempts.
+//
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
+func WithBackoffMaxDelay(md time.Duration) DialOption {
+	return WithBackoffConfig(BackoffConfig{MaxDelay: md})
+}
+
+// WithBackoffConfig configures the dialer to use the provided backoff
+// parameters after connection failures.
+//
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
+func WithBackoffConfig(b BackoffConfig) DialOption {
+	bc := backoff.DefaultConfig
+	bc.MaxDelay = b.MaxDelay
+	return withBackoff(internalbackoff.Exponential{Config: bc})
+}
+
+// withBackoff sets the backoff strategy used for connectRetryNum after a failed
+// connection attempt.
+//
+// This can be exported if arbitrary backoff strategies are allowed by gRPC.
+func withBackoff(bs internalbackoff.Strategy) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.bs = bs
+	})
+}
+
+// WithBlock returns a DialOption which makes callers of Dial block until the
+// underlying connection is up. Without this, Dial returns immediately and
+// connecting the server happens in background.
+func WithBlock() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.block = true
+	})
+}
+
+// WithReturnConnectionError returns a DialOption which makes the client connection
+// return a string containing both the last connection error that occurred and
+// the context.DeadlineExceeded error.
+// Implies WithBlock()
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithReturnConnectionError() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.block = true
+		o.returnLastError = true
+	})
+}
+
+// WithInsecure returns a DialOption which disables transport security for this
+// ClientConn. Under the hood, it uses insecure.NewCredentials().
+//
+// Note that using this DialOption with per-RPC credentials (through
+// WithCredentialsBundle or WithPerRPCCredentials) which require transport
+// security is incompatible and will cause grpc.Dial() to fail.
+//
+// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
+// instead. Will be supported throughout 1.x.
+func WithInsecure() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.TransportCredentials = insecure.NewCredentials()
+	})
+}
+
+// WithNoProxy returns a DialOption which disables the use of proxies for this
+// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithNoProxy() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.UseProxy = false
+	})
+}
+
+// WithTransportCredentials returns a DialOption which configures a connection
+// level security credentials (e.g., TLS/SSL). This should not be used together
+// with WithCredentialsBundle.
+func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.TransportCredentials = creds
+	})
+}
+
+// WithPerRPCCredentials returns a DialOption which sets credentials and places
+// auth state on each outbound RPC.
+func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
+	})
+}
+
+// WithCredentialsBundle returns a DialOption to set a credentials bundle for
+// the ClientConn.WithCreds. This should not be used together with
+// WithTransportCredentials.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithCredentialsBundle(b credentials.Bundle) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.CredsBundle = b
+	})
+}
+
+// WithTimeout returns a DialOption that configures a timeout for dialing a
+// ClientConn initially. This is valid if and only if WithBlock() is present.
+//
+// Deprecated: use DialContext instead of Dial and context.WithTimeout
+// instead.  Will be supported throughout 1.x.
+func WithTimeout(d time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.timeout = d
+	})
+}
+
+// WithContextDialer returns a DialOption that sets a dialer to create
+// connections. If FailOnNonTempDialError() is set to true, and an error is
+// returned by f, gRPC checks the error's Temporary() method to decide if it
+// should try to reconnect to the network address.
+func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.Dialer = f
+	})
+}
+
+func init() {
+	internal.WithHealthCheckFunc = withHealthCheckFunc
+}
+
+// WithDialer returns a DialOption that specifies a function to use for dialing
+// network addresses. If FailOnNonTempDialError() is set to true, and an error
+// is returned by f, gRPC checks the error's Temporary() method to decide if it
+// should try to reconnect to the network address.
+//
+// Deprecated: use WithContextDialer instead.  Will be supported throughout
+// 1.x.
+func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
+	return WithContextDialer(
+		func(ctx context.Context, addr string) (net.Conn, error) {
+			if deadline, ok := ctx.Deadline(); ok {
+				return f(addr, time.Until(deadline))
+			}
+			return f(addr, 0)
+		})
+}
+
+// WithStatsHandler returns a DialOption that specifies the stats handler for
+// all the RPCs and underlying network connections in this ClientConn.
+func WithStatsHandler(h stats.Handler) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		if h == nil {
+			logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption")
+			// Do not allow a nil stats handler, which would otherwise cause
+			// panics.
+			return
+		}
+		o.copts.StatsHandlers = append(o.copts.StatsHandlers, h)
+	})
+}
+
+// withBinaryLogger returns a DialOption that specifies the binary logger for
+// this ClientConn.
+func withBinaryLogger(bl binarylog.Logger) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.binaryLogger = bl
+	})
+}
+
+// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
+// non-temporary dial errors. If f is true, and dialer returns a non-temporary
+// error, gRPC will fail the connection to the network address and won't try to
+// reconnect. The default value of FailOnNonTempDialError is false.
+//
+// FailOnNonTempDialError only affects the initial dial, and does not do
+// anything useful unless you are also using WithBlock().
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FailOnNonTempDialError(f bool) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.FailOnNonTempDialError = f
+	})
+}
+
+// WithUserAgent returns a DialOption that specifies a user agent string for all
+// the RPCs.
+func WithUserAgent(s string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.UserAgent = s
+	})
+}
+
+// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
+// for the client transport.
+func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
+	if kp.Time < internal.KeepaliveMinPingTime {
+		logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
+		kp.Time = internal.KeepaliveMinPingTime
+	}
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.KeepaliveParams = kp
+	})
+}
+
+// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
+// unary RPCs.
+func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.unaryInt = f
+	})
+}
+
+// WithChainUnaryInterceptor returns a DialOption that specifies the chained
+// interceptor for unary RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All interceptors added by this method will be chained, and the interceptor
+// defined by WithUnaryInterceptor will always be prepended to the chain.
+func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
+	})
+}
+
+// WithStreamInterceptor returns a DialOption that specifies the interceptor for
+// streaming RPCs.
+func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.streamInt = f
+	})
+}
+
+// WithChainStreamInterceptor returns a DialOption that specifies the chained
+// interceptor for streaming RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All interceptors added by this method will be chained, and the interceptor
+// defined by WithStreamInterceptor will always be prepended to the chain.
+func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
+	})
+}
+
+// WithAuthority returns a DialOption that specifies the value to be used as the
+// :authority pseudo-header and as the server name in authentication handshake.
+func WithAuthority(a string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.authority = a
+	})
+}
+
+// WithChannelzParentID returns a DialOption that specifies the channelz ID of
+// current ClientConn's parent. This function is used in nested channel creation
+// (e.g. grpclb dial).
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithChannelzParentID(id *channelz.Identifier) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.channelzParentID = id
+	})
+}
+
+// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any
+// service config provided by the resolver and provides a hint to the resolver
+// to not fetch service configs.
+//
+// Note that this dial option only disables service config from resolver. If
+// default service config is provided, gRPC will use the default service config.
+func WithDisableServiceConfig() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.disableServiceConfig = true
+	})
+}
+
+// WithDefaultServiceConfig returns a DialOption that configures the default
+// service config, which will be used in cases where:
+//
+// 1. WithDisableServiceConfig is also used, or
+//
+// 2. The name resolver does not provide a service config or provides an
+// invalid service config.
+//
+// The parameter s is the JSON representation of the default service config.
+// For more information about service configs, see:
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+// For a simple example of usage, see:
+// examples/features/load_balancing/client/main.go
+func WithDefaultServiceConfig(s string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.defaultServiceConfigRawJSON = &s
+	})
+}
+
+// WithDisableRetry returns a DialOption that disables retries, even if the
+// service config enables them.  This does not impact transparent retries, which
+// will happen automatically if no data is written to the wire or if the RPC is
+// unprocessed by the remote server.
+func WithDisableRetry() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.disableRetry = true
+	})
+}
+
+// WithMaxHeaderListSize returns a DialOption that specifies the maximum
+// (uncompressed) size of header list that the client is prepared to accept.
+func WithMaxHeaderListSize(s uint32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.MaxHeaderListSize = &s
+	})
+}
+
+// WithDisableHealthCheck disables the LB channel health checking for all
+// SubConns of this ClientConn.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithDisableHealthCheck() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.disableHealthCheck = true
+	})
+}
+
+// withHealthCheckFunc replaces the default health check function with the
+// provided one. It makes tests easier to change the health check function.
+//
+// For testing purpose only.
+func withHealthCheckFunc(f internal.HealthChecker) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.healthCheckFunc = f
+	})
+}
+
+func defaultDialOptions() dialOptions {
+	return dialOptions{
+		healthCheckFunc: internal.HealthCheckFunc,
+		copts: transport.ConnectOptions{
+			WriteBufferSize: defaultWriteBufSize,
+			ReadBufferSize:  defaultReadBufSize,
+			UseProxy:        true,
+		},
+	}
+}
+
+// withGetMinConnectDeadline specifies the function that clientconn uses to
+// get minConnectDeadline. This can be used to make connection attempts happen
+// faster/slower.
+//
+// For testing purpose only.
+func withMinConnectDeadline(f func() time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.minConnectTimeout = f
+	})
+}
+
+// WithResolvers allows a list of resolver implementations to be registered
+// locally with the ClientConn without needing to be globally registered via
+// resolver.Register.  They will be matched against the scheme used for the
+// current Dial only, and will take precedence over the global registry.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithResolvers(rs ...resolver.Builder) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.resolvers = append(o.resolvers, rs...)
+	})
+}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
new file mode 100644
index 000000000..0022859ad
--- /dev/null
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+//go:generate ./regenerate.sh
+
+/*
+Package grpc implements an RPC system called gRPC.
+
+See grpc.io for more information about gRPC.
+*/
+package grpc // import "google.golang.org/grpc"
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
new file mode 100644
index 000000000..07a586135
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -0,0 +1,135 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package encoding defines the interface for the compressor and codec, and
+// functions to register and retrieve compressors and codecs.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package encoding
+
+import (
+	"io"
+	"strings"
+
+	"google.golang.org/grpc/internal/grpcutil"
+)
+
+// Identity specifies the optional encoding for uncompressed streams.
+// It is intended for grpc internal use only.
+const Identity = "identity"
+
+// Compressor is used for compressing and decompressing when sending or
+// receiving messages.
+type Compressor interface {
+	// Compress writes the data written to wc to w after compressing it.  If an
+	// error occurs while initializing the compressor, that error is returned
+	// instead.
+	Compress(w io.Writer) (io.WriteCloser, error)
+	// Decompress reads data from r, decompresses it, and provides the
+	// uncompressed data via the returned io.Reader.  If an error occurs while
+	// initializing the decompressor, that error is returned instead.
+	Decompress(r io.Reader) (io.Reader, error)
+	// Name is the name of the compression codec and is used to set the content
+	// coding header.  The result must be static; the result cannot change
+	// between calls.
+	Name() string
+	// If a Compressor implements
+	// DecompressedSize(compressedBytes []byte) int, gRPC will call it
+	// to determine the size of the buffer allocated for the result of decompression.
+	// Return -1 to indicate unknown size.
+	//
+	// Experimental
+	//
+	// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+	// later release.
+}
+
+var registeredCompressor = make(map[string]Compressor)
+
+// RegisterCompressor registers the compressor with gRPC by its name.  It can
+// be activated when sending an RPC via grpc.UseCompressor().  It will be
+// automatically accessed when receiving a message based on the content coding
+// header.  Servers also use it to send a response with the same encoding as
+// the request.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe.  If multiple Compressors are
+// registered with the same name, the one registered last will take effect.
+func RegisterCompressor(c Compressor) {
+	registeredCompressor[c.Name()] = c
+	if !grpcutil.IsCompressorNameRegistered(c.Name()) {
+		grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
+	}
+}
+
+// GetCompressor returns Compressor for the given compressor name.
+func GetCompressor(name string) Compressor {
+	return registeredCompressor[name]
+}
+
+// Codec defines the interface gRPC uses to encode and decode messages.  Note
+// that implementations of this interface must be thread safe; a Codec's
+// methods can be called from concurrent goroutines.
+type Codec interface {
+	// Marshal returns the wire format of v.
+	Marshal(v interface{}) ([]byte, error)
+	// Unmarshal parses the wire format into v.
+	Unmarshal(data []byte, v interface{}) error
+	// Name returns the name of the Codec implementation. The returned string
+	// will be used as part of content type in transmission.  The result must be
+	// static; the result cannot change between calls.
+	Name() string
+}
+
+var registeredCodecs = make(map[string]Codec)
+
+// RegisterCodec registers the provided Codec for use with all gRPC clients and
+// servers.
+//
+// The Codec will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the Codec.  This
+// is case-insensitive, and is stored and looked up as lowercase.  If the
+// result of calling Name() is an empty string, RegisterCodec will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe.  If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodec(codec Codec) {
+	if codec == nil {
+		panic("cannot register a nil Codec")
+	}
+	if codec.Name() == "" {
+		panic("cannot register Codec with empty string result for Name()")
+	}
+	contentSubtype := strings.ToLower(codec.Name())
+	registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodec(contentSubtype string) Codec {
+	return registeredCodecs[contentSubtype]
+}
diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go
new file mode 100644
index 000000000..a3bb173c2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go
@@ -0,0 +1,132 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package gzip implements and registers the gzip compressor
+// during the initialization.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package gzip
+
+import (
+	"compress/gzip"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"sync"
+
+	"google.golang.org/grpc/encoding"
+)
+
+// Name is the name registered for the gzip compressor.
+const Name = "gzip"
+
+func init() {
+	c := &compressor{}
+	c.poolCompressor.New = func() interface{} {
+		return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor}
+	}
+	encoding.RegisterCompressor(c)
+}
+
+type writer struct {
+	*gzip.Writer
+	pool *sync.Pool
+}
+
+// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported).
+// NOTE: this function must only be called during initialization time (i.e. in an init() function),
+// and is not thread-safe.
+//
+// The error returned will be nil if the specified level is valid.
+func SetLevel(level int) error {
+	if level < gzip.DefaultCompression || level > gzip.BestCompression {
+		return fmt.Errorf("grpc: invalid gzip compression level: %d", level)
+	}
+	c := encoding.GetCompressor(Name).(*compressor)
+	c.poolCompressor.New = func() interface{} {
+		w, err := gzip.NewWriterLevel(io.Discard, level)
+		if err != nil {
+			panic(err)
+		}
+		return &writer{Writer: w, pool: &c.poolCompressor}
+	}
+	return nil
+}
+
+func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) {
+	z := c.poolCompressor.Get().(*writer)
+	z.Writer.Reset(w)
+	return z, nil
+}
+
+func (z *writer) Close() error {
+	defer z.pool.Put(z)
+	return z.Writer.Close()
+}
+
+type reader struct {
+	*gzip.Reader
+	pool *sync.Pool
+}
+
+func (c *compressor) Decompress(r io.Reader) (io.Reader, error) {
+	z, inPool := c.poolDecompressor.Get().(*reader)
+	if !inPool {
+		newZ, err := gzip.NewReader(r)
+		if err != nil {
+			return nil, err
+		}
+		return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil
+	}
+	if err := z.Reset(r); err != nil {
+		c.poolDecompressor.Put(z)
+		return nil, err
+	}
+	return z, nil
+}
+
+func (z *reader) Read(p []byte) (n int, err error) {
+	n, err = z.Reader.Read(p)
+	if err == io.EOF {
+		z.pool.Put(z)
+	}
+	return n, err
+}
+
+// RFC1952 specifies that the last four bytes "contains the size of
+// the original (uncompressed) input data modulo 2^32."
+// gRPC has a max message size of 2GB so we don't need to worry about wraparound.
+func (c *compressor) DecompressedSize(buf []byte) int {
+	last := len(buf)
+	if last < 4 {
+		return -1
+	}
+	return int(binary.LittleEndian.Uint32(buf[last-4 : last]))
+}
+
+func (c *compressor) Name() string {
+	return Name
+}
+
+type compressor struct {
+	poolCompressor   sync.Pool
+	poolDecompressor sync.Pool
+}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
new file mode 100644
index 000000000..3009b35af
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package proto defines the protobuf codec. Importing this package will
+// register the codec.
+package proto
+
+import (
+	"fmt"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/grpc/encoding"
+)
+
+// Name is the name registered for the proto compressor.
+const Name = "proto"
+
+func init() {
+	encoding.RegisterCodec(codec{})
+}
+
+// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
+type codec struct{}
+
+func (codec) Marshal(v interface{}) ([]byte, error) {
+	vv, ok := v.(proto.Message)
+	if !ok {
+		return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
+	}
+	return proto.Marshal(vv)
+}
+
+func (codec) Unmarshal(data []byte, v interface{}) error {
+	vv, ok := v.(proto.Message)
+	if !ok {
+		return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
+	}
+	return proto.Unmarshal(data, vv)
+}
+
+func (codec) Name() string {
+	return Name
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
new file mode 100644
index 000000000..8358dd6e2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/component.go
@@ -0,0 +1,117 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/internal/grpclog"
+)
+
+// componentData records the settings for a component.
+type componentData struct {
+	name string
+}
+
+var cache = map[string]*componentData{}
+
+func (c *componentData) InfoDepth(depth int, args ...interface{}) {
+	args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+	grpclog.InfoDepth(depth+1, args...)
+}
+
+func (c *componentData) WarningDepth(depth int, args ...interface{}) {
+	args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+	grpclog.WarningDepth(depth+1, args...)
+}
+
+func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
+	args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+	grpclog.ErrorDepth(depth+1, args...)
+}
+
+func (c *componentData) FatalDepth(depth int, args ...interface{}) {
+	args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
+	grpclog.FatalDepth(depth+1, args...)
+}
+
+func (c *componentData) Info(args ...interface{}) {
+	c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warning(args ...interface{}) {
+	c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Error(args ...interface{}) {
+	c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatal(args ...interface{}) {
+	c.FatalDepth(1, args...)
+}
+
+func (c *componentData) Infof(format string, args ...interface{}) {
+	c.InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Warningf(format string, args ...interface{}) {
+	c.WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Errorf(format string, args ...interface{}) {
+	c.ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Fatalf(format string, args ...interface{}) {
+	c.FatalDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Infoln(args ...interface{}) {
+	c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warningln(args ...interface{}) {
+	c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Errorln(args ...interface{}) {
+	c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatalln(args ...interface{}) {
+	c.FatalDepth(1, args...)
+}
+
+func (c *componentData) V(l int) bool {
+	return V(l)
+}
+
+// Component creates a new component and returns it for logging. If a component
+// with the name already exists, nothing will be created and it will be
+// returned. SetLoggerV2 will panic if it is called with a logger created by
+// Component.
+func Component(componentName string) DepthLoggerV2 {
+	if cData, ok := cache[componentName]; ok {
+		return cData
+	}
+	c := &componentData{componentName}
+	cache[componentName] = c
+	return c
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
new file mode 100644
index 000000000..c8bb2be34
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -0,0 +1,132 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpclog defines logging for grpc.
+//
+// All logs in transport and grpclb packages only go to verbose level 2.
+// All logs in other packages in grpc are logged in spite of the verbosity level.
+//
+// In the default logger,
+// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
+// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog // import "google.golang.org/grpc/grpclog"
+
+import (
+	"os"
+
+	"google.golang.org/grpc/internal/grpclog"
+)
+
+func init() {
+	SetLoggerV2(newLoggerV2())
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func V(l int) bool {
+	return grpclog.Logger.V(l)
+}
+
+// Info logs to the INFO log.
+func Info(args ...interface{}) {
+	grpclog.Logger.Info(args...)
+}
+
+// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
+func Infof(format string, args ...interface{}) {
+	grpclog.Logger.Infof(format, args...)
+}
+
+// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
+func Infoln(args ...interface{}) {
+	grpclog.Logger.Infoln(args...)
+}
+
+// Warning logs to the WARNING log.
+func Warning(args ...interface{}) {
+	grpclog.Logger.Warning(args...)
+}
+
+// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
+func Warningf(format string, args ...interface{}) {
+	grpclog.Logger.Warningf(format, args...)
+}
+
+// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
+func Warningln(args ...interface{}) {
+	grpclog.Logger.Warningln(args...)
+}
+
+// Error logs to the ERROR log.
+func Error(args ...interface{}) {
+	grpclog.Logger.Error(args...)
+}
+
+// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
+func Errorf(format string, args ...interface{}) {
+	grpclog.Logger.Errorf(format, args...)
+}
+
+// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
+func Errorln(args ...interface{}) {
+	grpclog.Logger.Errorln(args...)
+}
+
+// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
+// It calls os.Exit() with exit code 1.
+func Fatal(args ...interface{}) {
+	grpclog.Logger.Fatal(args...)
+	// Make sure fatal logs will exit.
+	os.Exit(1)
+}
+
+// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
+// It calls os.Exit() with exit code 1.
+func Fatalf(format string, args ...interface{}) {
+	grpclog.Logger.Fatalf(format, args...)
+	// Make sure fatal logs will exit.
+	os.Exit(1)
+}
+
+// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
+// It calle os.Exit()) with exit code 1.
+func Fatalln(args ...interface{}) {
+	grpclog.Logger.Fatalln(args...)
+	// Make sure fatal logs will exit.
+	os.Exit(1)
+}
+
+// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
+//
+// Deprecated: use Info.
+func Print(args ...interface{}) {
+	grpclog.Logger.Info(args...)
+}
+
+// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
+//
+// Deprecated: use Infof.
+func Printf(format string, args ...interface{}) {
+	grpclog.Logger.Infof(format, args...)
+}
+
+// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
+//
+// Deprecated: use Infoln.
+func Println(args ...interface{}) {
+	grpclog.Logger.Infoln(args...)
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
new file mode 100644
index 000000000..ef06a4822
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import "google.golang.org/grpc/internal/grpclog"
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger interface {
+	Fatal(args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Fatalln(args ...interface{})
+	Print(args ...interface{})
+	Printf(format string, args ...interface{})
+	Println(args ...interface{})
+}
+
+// SetLogger sets the logger that is used in grpc. Call only from
+// init() functions.
+//
+// Deprecated: use SetLoggerV2.
+func SetLogger(l Logger) {
+	grpclog.Logger = &loggerWrapper{Logger: l}
+}
+
+// loggerWrapper wraps Logger into a LoggerV2.
+type loggerWrapper struct {
+	Logger
+}
+
+func (g *loggerWrapper) Info(args ...interface{}) {
+	g.Logger.Print(args...)
+}
+
+func (g *loggerWrapper) Infoln(args ...interface{}) {
+	g.Logger.Println(args...)
+}
+
+func (g *loggerWrapper) Infof(format string, args ...interface{}) {
+	g.Logger.Printf(format, args...)
+}
+
+func (g *loggerWrapper) Warning(args ...interface{}) {
+	g.Logger.Print(args...)
+}
+
+func (g *loggerWrapper) Warningln(args ...interface{}) {
+	g.Logger.Println(args...)
+}
+
+func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
+	g.Logger.Printf(format, args...)
+}
+
+func (g *loggerWrapper) Error(args ...interface{}) {
+	g.Logger.Print(args...)
+}
+
+func (g *loggerWrapper) Errorln(args ...interface{}) {
+	g.Logger.Println(args...)
+}
+
+func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
+	g.Logger.Printf(format, args...)
+}
+
+func (g *loggerWrapper) V(l int) bool {
+	// Returns true for all verbose level.
+	return true
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
new file mode 100644
index 000000000..5de66e40d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -0,0 +1,258 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"strconv"
+	"strings"
+
+	"google.golang.org/grpc/internal/grpclog"
+)
+
+// LoggerV2 does underlying logging work for grpclog.
+type LoggerV2 interface {
+	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+	Info(args ...interface{})
+	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+	Infoln(args ...interface{})
+	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+	Infof(format string, args ...interface{})
+	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+	Warning(args ...interface{})
+	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+	Warningln(args ...interface{})
+	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+	Warningf(format string, args ...interface{})
+	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	Error(args ...interface{})
+	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	Errorln(args ...interface{})
+	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	Errorf(format string, args ...interface{})
+	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatal(args ...interface{})
+	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalln(args ...interface{})
+	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalf(format string, args ...interface{})
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l int) bool
+}
+
+// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
+// Not mutex-protected, should be called before any gRPC functions.
+func SetLoggerV2(l LoggerV2) {
+	if _, ok := l.(*componentData); ok {
+		panic("cannot use component logger as grpclog logger")
+	}
+	grpclog.Logger = l
+	grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
+}
+
+const (
+	// infoLog indicates Info severity.
+	infoLog int = iota
+	// warningLog indicates Warning severity.
+	warningLog
+	// errorLog indicates Error severity.
+	errorLog
+	// fatalLog indicates Fatal severity.
+	fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+	infoLog:    "INFO",
+	warningLog: "WARNING",
+	errorLog:   "ERROR",
+	fatalLog:   "FATAL",
+}
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+	m          []*log.Logger
+	v          int
+	jsonFormat bool
+}
+
+// NewLoggerV2 creates a loggerV2 with the provided writers.
+// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
+// Error logs will be written to errorW, warningW and infoW.
+// Warning logs will be written to warningW and infoW.
+// Info logs will be written to infoW.
+func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
+	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
+}
+
+// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
+// verbosity level.
+func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
+	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
+}
+
+type loggerV2Config struct {
+	verbose    int
+	jsonFormat bool
+}
+
+func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
+	var m []*log.Logger
+	flag := log.LstdFlags
+	if c.jsonFormat {
+		flag = 0
+	}
+	m = append(m, log.New(infoW, "", flag))
+	m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
+	ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
+	m = append(m, log.New(ew, "", flag))
+	m = append(m, log.New(ew, "", flag))
+	return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
+}
+
+// newLoggerV2 creates a loggerV2 to be used as default logger.
+// All logs are written to stderr.
+func newLoggerV2() LoggerV2 {
+	errorW := io.Discard
+	warningW := io.Discard
+	infoW := io.Discard
+
+	logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
+	switch logLevel {
+	case "", "ERROR", "error": // If env is unset, set level to ERROR.
+		errorW = os.Stderr
+	case "WARNING", "warning":
+		warningW = os.Stderr
+	case "INFO", "info":
+		infoW = os.Stderr
+	}
+
+	var v int
+	vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
+	if vl, err := strconv.Atoi(vLevel); err == nil {
+		v = vl
+	}
+
+	jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
+
+	return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
+		verbose:    v,
+		jsonFormat: jsonFormat,
+	})
+}
+
+func (g *loggerT) output(severity int, s string) {
+	sevStr := severityName[severity]
+	if !g.jsonFormat {
+		g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
+		return
+	}
+	// TODO: we can also include the logging component, but that needs more
+	// (API) changes.
+	b, _ := json.Marshal(map[string]string{
+		"severity": sevStr,
+		"message":  s,
+	})
+	g.m[severity].Output(2, string(b))
+}
+
+func (g *loggerT) Info(args ...interface{}) {
+	g.output(infoLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Infoln(args ...interface{}) {
+	g.output(infoLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Infof(format string, args ...interface{}) {
+	g.output(infoLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Warning(args ...interface{}) {
+	g.output(warningLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Warningln(args ...interface{}) {
+	g.output(warningLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Warningf(format string, args ...interface{}) {
+	g.output(warningLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Error(args ...interface{}) {
+	g.output(errorLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Errorln(args ...interface{}) {
+	g.output(errorLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Errorf(format string, args ...interface{}) {
+	g.output(errorLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Fatal(args ...interface{}) {
+	g.output(fatalLog, fmt.Sprint(args...))
+	os.Exit(1)
+}
+
+func (g *loggerT) Fatalln(args ...interface{}) {
+	g.output(fatalLog, fmt.Sprintln(args...))
+	os.Exit(1)
+}
+
+func (g *loggerT) Fatalf(format string, args ...interface{}) {
+	g.output(fatalLog, fmt.Sprintf(format, args...))
+	os.Exit(1)
+}
+
+func (g *loggerT) V(l int) bool {
+	return l <= g.v
+}
+
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type DepthLoggerV2 interface {
+	LoggerV2
+	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	InfoDepth(depth int, args ...interface{})
+	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	WarningDepth(depth int, args ...interface{})
+	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	ErrorDepth(depth int, args ...interface{})
+	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	FatalDepth(depth int, args ...interface{})
+}
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
new file mode 100644
index 000000000..bb96ef57b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -0,0 +1,104 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+)
+
+// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
+type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
+
+// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
+// Unary interceptors can be specified as a DialOption, using
+// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a
+// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC
+// delegates all unary RPC invocations to the interceptor, and it is the
+// responsibility of the interceptor to call invoker to complete the processing
+// of the RPC.
+//
+// method is the RPC name. req and reply are the corresponding request and
+// response messages. cc is the ClientConn on which the RPC was invoked. invoker
+// is the handler to complete the RPC and it is the responsibility of the
+// interceptor to call it. opts contain all applicable call options, including
+// defaults from the ClientConn as well as per-call options.
+//
+// The returned error must be compatible with the status package.
+type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
+
+// Streamer is called by StreamClientInterceptor to create a ClientStream.
+type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
+
+// StreamClientInterceptor intercepts the creation of a ClientStream. Stream
+// interceptors can be specified as a DialOption, using WithStreamInterceptor()
+// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream
+// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations
+// to the interceptor, and it is the responsibility of the interceptor to call
+// streamer.
+//
+// desc contains a description of the stream. cc is the ClientConn on which the
+// RPC was invoked. streamer is the handler to create a ClientStream and it is
+// the responsibility of the interceptor to call it. opts contain all applicable
+// call options, including defaults from the ClientConn as well as per-call
+// options.
+//
+// StreamClientInterceptor may return a custom ClientStream to intercept all I/O
+// operations. The returned error must be compatible with the status package.
+type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
+
+// UnaryServerInfo consists of various information about a unary RPC on
+// server side. All per-rpc information may be mutated by the interceptor.
+type UnaryServerInfo struct {
+	// Server is the service implementation the user provides. This is read-only.
+	Server interface{}
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+}
+
+// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
+// execution of a unary RPC.
+//
+// If a UnaryHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
+type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
+
+// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
+// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
+// of the service method implementation. It is the responsibility of the interceptor to invoke handler
+// to complete the RPC.
+type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
+
+// StreamServerInfo consists of various information about a streaming RPC on
+// server side. All per-rpc information may be mutated by the interceptor.
+type StreamServerInfo struct {
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+}
+
+// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
+// info contains all the information of this RPC the interceptor can operate on. And handler is the
+// service method implementation. It is the responsibility of the interceptor to invoke handler to
+// complete the RPC.
+type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
new file mode 100644
index 000000000..5fc0ee3da
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package backoff implement the backoff strategy for gRPC.
+//
+// This is kept in internal until the gRPC project decides whether or not to
+// allow alternative backoff strategies.
+package backoff
+
+import (
+	"time"
+
+	grpcbackoff "google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/internal/grpcrand"
+)
+
+// Strategy defines the methodology for backing off after a grpc connection
+// failure.
+type Strategy interface {
+	// Backoff returns the amount of time to wait before the next retry given
+	// the number of consecutive failures.
+	Backoff(retries int) time.Duration
+}
+
+// DefaultExponential is an exponential backoff implementation using the
+// default values for all the configurable knobs defined in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
+
+// Exponential implements exponential backoff algorithm as defined in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+type Exponential struct {
+	// Config contains all options to configure the backoff algorithm.
+	Config grpcbackoff.Config
+}
+
+// Backoff returns the amount of time to wait before the next retry given the
+// number of retries.
+func (bc Exponential) Backoff(retries int) time.Duration {
+	if retries == 0 {
+		return bc.Config.BaseDelay
+	}
+	backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
+	for backoff < max && retries > 0 {
+		backoff *= bc.Config.Multiplier
+		retries--
+	}
+	if backoff > max {
+		backoff = max
+	}
+	// Randomize backoff delays so that if a cluster of requests start at
+	// the same time, they won't operate in lockstep.
+	backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
+	if backoff < 0 {
+		return 0
+	}
+	return time.Duration(backoff)
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
new file mode 100644
index 000000000..08666f62a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -0,0 +1,384 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package gracefulswitch implements a graceful switch load balancer.
+package gracefulswitch
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/resolver"
+)
+
+var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
+var _ balancer.Balancer = (*Balancer)(nil)
+
+// NewBalancer returns a graceful switch Balancer.
+func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
+	return &Balancer{
+		cc:    cc,
+		bOpts: opts,
+	}
+}
+
+// Balancer is a utility to gracefully switch from one balancer to
+// a new balancer. It implements the balancer.Balancer interface.
+type Balancer struct {
+	bOpts balancer.BuildOptions
+	cc    balancer.ClientConn
+
+	// mu protects the following fields and all fields within balancerCurrent
+	// and balancerPending. mu does not need to be held when calling into the
+	// child balancers, as all calls into these children happen only as a direct
+	// result of a call into the gracefulSwitchBalancer, which are also
+	// guaranteed to be synchronous. There is one exception: an UpdateState call
+	// from a child balancer when current and pending are populated can lead to
+	// calling Close() on the current. To prevent that racing with an
+	// UpdateSubConnState from the channel, we hold currentMu during Close and
+	// UpdateSubConnState calls.
+	mu              sync.Mutex
+	balancerCurrent *balancerWrapper
+	balancerPending *balancerWrapper
+	closed          bool // set to true when this balancer is closed
+
+	// currentMu must be locked before mu. This mutex guards against this
+	// sequence of events: UpdateSubConnState() called, finds the
+	// balancerCurrent, gives up lock, updateState comes in, causes Close() on
+	// balancerCurrent before the UpdateSubConnState is called on the
+	// balancerCurrent.
+	currentMu sync.Mutex
+}
+
+// swap swaps out the current lb with the pending lb and updates the ClientConn.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) swap() {
+	gsb.cc.UpdateState(gsb.balancerPending.lastState)
+	cur := gsb.balancerCurrent
+	gsb.balancerCurrent = gsb.balancerPending
+	gsb.balancerPending = nil
+	go func() {
+		gsb.currentMu.Lock()
+		defer gsb.currentMu.Unlock()
+		cur.Close()
+	}()
+}
+
+// Helper function that checks if the balancer passed in is current or pending.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
+	return bw == gsb.balancerCurrent || bw == gsb.balancerPending
+}
+
+// SwitchTo initializes the graceful switch process, which completes based on
+// connectivity state changes on the current/pending balancer. Thus, the switch
+// process is not complete when this method returns. This method must be called
+// synchronously alongside the rest of the balancer.Balancer methods this
+// Graceful Switch Balancer implements.
+func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
+	gsb.mu.Lock()
+	if gsb.closed {
+		gsb.mu.Unlock()
+		return errBalancerClosed
+	}
+	bw := &balancerWrapper{
+		gsb: gsb,
+		lastState: balancer.State{
+			ConnectivityState: connectivity.Connecting,
+			Picker:            base.NewErrPicker(balancer.ErrNoSubConnAvailable),
+		},
+		subconns: make(map[balancer.SubConn]bool),
+	}
+	balToClose := gsb.balancerPending // nil if there is no pending balancer
+	if gsb.balancerCurrent == nil {
+		gsb.balancerCurrent = bw
+	} else {
+		gsb.balancerPending = bw
+	}
+	gsb.mu.Unlock()
+	balToClose.Close()
+	// This function takes a builder instead of a balancer because builder.Build
+	// can call back inline, and this utility needs to handle the callbacks.
+	newBalancer := builder.Build(bw, gsb.bOpts)
+	if newBalancer == nil {
+		// This is illegal and should never happen; we clear the balancerWrapper
+		// we were constructing if it happens to avoid a potential panic.
+		gsb.mu.Lock()
+		if gsb.balancerPending != nil {
+			gsb.balancerPending = nil
+		} else {
+			gsb.balancerCurrent = nil
+		}
+		gsb.mu.Unlock()
+		return balancer.ErrBadResolverState
+	}
+
+	// This write doesn't need to take gsb.mu because this field never gets read
+	// or written to on any calls from the current or pending. Calls from grpc
+	// to this balancer are guaranteed to be called synchronously, so this
+	// bw.Balancer field will never be forwarded to until this SwitchTo()
+	// function returns.
+	bw.Balancer = newBalancer
+	return nil
+}
+
+// Returns nil if the graceful switch balancer is closed.
+func (gsb *Balancer) latestBalancer() *balancerWrapper {
+	gsb.mu.Lock()
+	defer gsb.mu.Unlock()
+	if gsb.balancerPending != nil {
+		return gsb.balancerPending
+	}
+	return gsb.balancerCurrent
+}
+
+// UpdateClientConnState forwards the update to the latest balancer created.
+func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
+	// The resolver data is only relevant to the most recent LB Policy.
+	balToUpdate := gsb.latestBalancer()
+	if balToUpdate == nil {
+		return errBalancerClosed
+	}
+	// Perform this call without gsb.mu to prevent deadlocks if the child calls
+	// back into the channel. The latest balancer can never be closed during a
+	// call from the channel, even without gsb.mu held.
+	return balToUpdate.UpdateClientConnState(state)
+}
+
+// ResolverError forwards the error to the latest balancer created.
+func (gsb *Balancer) ResolverError(err error) {
+	// The resolver data is only relevant to the most recent LB Policy.
+	balToUpdate := gsb.latestBalancer()
+	if balToUpdate == nil {
+		return
+	}
+	// Perform this call without gsb.mu to prevent deadlocks if the child calls
+	// back into the channel. The latest balancer can never be closed during a
+	// call from the channel, even without gsb.mu held.
+	balToUpdate.ResolverError(err)
+}
+
+// ExitIdle forwards the call to the latest balancer created.
+//
+// If the latest balancer does not support ExitIdle, the subConns are
+// re-connected to manually.
+func (gsb *Balancer) ExitIdle() {
+	balToUpdate := gsb.latestBalancer()
+	if balToUpdate == nil {
+		return
+	}
+	// There is no need to protect this read with a mutex, as the write to the
+	// Balancer field happens in SwitchTo, which completes before this can be
+	// called.
+	if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
+		ei.ExitIdle()
+		return
+	}
+	gsb.mu.Lock()
+	defer gsb.mu.Unlock()
+	for sc := range balToUpdate.subconns {
+		sc.Connect()
+	}
+}
+
+// UpdateSubConnState forwards the update to the appropriate child.
+func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+	gsb.currentMu.Lock()
+	defer gsb.currentMu.Unlock()
+	gsb.mu.Lock()
+	// Forward update to the appropriate child.  Even if there is a pending
+	// balancer, the current balancer should continue to get SubConn updates to
+	// maintain the proper state while the pending is still connecting.
+	var balToUpdate *balancerWrapper
+	if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
+		balToUpdate = gsb.balancerCurrent
+	} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
+		balToUpdate = gsb.balancerPending
+	}
+	gsb.mu.Unlock()
+	if balToUpdate == nil {
+		// SubConn belonged to a stale lb policy that has not yet fully closed,
+		// or the balancer was already closed.
+		return
+	}
+	balToUpdate.UpdateSubConnState(sc, state)
+}
+
+// Close closes any active child balancers.
+func (gsb *Balancer) Close() {
+	gsb.mu.Lock()
+	gsb.closed = true
+	currentBalancerToClose := gsb.balancerCurrent
+	gsb.balancerCurrent = nil
+	pendingBalancerToClose := gsb.balancerPending
+	gsb.balancerPending = nil
+	gsb.mu.Unlock()
+
+	currentBalancerToClose.Close()
+	pendingBalancerToClose.Close()
+}
+
+// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
+// methods to help cleanup SubConns created by the wrapped balancer.
+//
+// It implements the balancer.ClientConn interface and is passed down in that
+// capacity to the wrapped balancer. It maintains a set of subConns created by
+// the wrapped balancer and calls from the latter to create/update/remove
+// SubConns update this set before being forwarded to the parent ClientConn.
+// State updates from the wrapped balancer can result in invocation of the
+// graceful switch logic.
+type balancerWrapper struct {
+	balancer.Balancer
+	gsb *Balancer
+
+	lastState balancer.State
+	subconns  map[balancer.SubConn]bool // subconns created by this balancer
+}
+
+func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+	if state.ConnectivityState == connectivity.Shutdown {
+		bw.gsb.mu.Lock()
+		delete(bw.subconns, sc)
+		bw.gsb.mu.Unlock()
+	}
+	// There is no need to protect this read with a mutex, as the write to the
+	// Balancer field happens in SwitchTo, which completes before this can be
+	// called.
+	bw.Balancer.UpdateSubConnState(sc, state)
+}
+
+// Close closes the underlying LB policy and removes the subconns it created. bw
+// must not be referenced via balancerCurrent or balancerPending in gsb when
+// called. gsb.mu must not be held.  Does not panic with a nil receiver.
+func (bw *balancerWrapper) Close() {
+	// before Close is called.
+	if bw == nil {
+		return
+	}
+	// There is no need to protect this read with a mutex, as Close() is
+	// impossible to be called concurrently with the write in SwitchTo(). The
+	// callsites of Close() for this balancer in Graceful Switch Balancer will
+	// never be called until SwitchTo() returns.
+	bw.Balancer.Close()
+	bw.gsb.mu.Lock()
+	for sc := range bw.subconns {
+		bw.gsb.cc.RemoveSubConn(sc)
+	}
+	bw.gsb.mu.Unlock()
+}
+
+func (bw *balancerWrapper) UpdateState(state balancer.State) {
+	// Hold the mutex for this entire call to ensure it cannot occur
+	// concurrently with other updateState() calls. This causes updates to
+	// lastState and calls to cc.UpdateState to happen atomically.
+	bw.gsb.mu.Lock()
+	defer bw.gsb.mu.Unlock()
+	bw.lastState = state
+
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		return
+	}
+
+	if bw == bw.gsb.balancerCurrent {
+		// In the case that the current balancer exits READY, and there is a pending
+		// balancer, you can forward the pending balancer's cached State up to
+		// ClientConn and swap the pending into the current. This is because there
+		// is no reason to gracefully switch from and keep using the old policy as
+		// the ClientConn is not connected to any backends.
+		if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
+			bw.gsb.swap()
+			return
+		}
+		// Even if there is a pending balancer waiting to be gracefully switched to,
+		// continue to forward current balancer updates to the Client Conn. Ignoring
+		// state + picker from the current would cause undefined behavior/cause the
+		// system to behave incorrectly from the current LB policies perspective.
+		// Also, the current LB is still being used by grpc to choose SubConns per
+		// RPC, and thus should use the most updated form of the current balancer.
+		bw.gsb.cc.UpdateState(state)
+		return
+	}
+	// This method is now dealing with a state update from the pending balancer.
+	// If the current balancer is currently in a state other than READY, the new
+	// policy can be swapped into place immediately. This is because there is no
+	// reason to gracefully switch from and keep using the old policy as the
+	// ClientConn is not connected to any backends.
+	if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
+		bw.gsb.swap()
+	}
+}
+
+func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		bw.gsb.mu.Unlock()
+		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+	}
+	bw.gsb.mu.Unlock()
+
+	sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
+	if err != nil {
+		return nil, err
+	}
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
+		bw.gsb.cc.RemoveSubConn(sc)
+		bw.gsb.mu.Unlock()
+		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+	}
+	bw.subconns[sc] = true
+	bw.gsb.mu.Unlock()
+	return sc, nil
+}
+
+func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
+	// Ignore ResolveNow requests from anything other than the most recent
+	// balancer, because older balancers were already removed from the config.
+	if bw != bw.gsb.latestBalancer() {
+		return
+	}
+	bw.gsb.cc.ResolveNow(opts)
+}
+
+func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		bw.gsb.mu.Unlock()
+		return
+	}
+	bw.gsb.mu.Unlock()
+	bw.gsb.cc.RemoveSubConn(sc)
+}
+
+func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		bw.gsb.mu.Unlock()
+		return
+	}
+	bw.gsb.mu.Unlock()
+	bw.gsb.cc.UpdateAddresses(sc, addrs)
+}
+
+func (bw *balancerWrapper) Target() string {
+	return bw.gsb.cc.Target()
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go
new file mode 100644
index 000000000..3a905d966
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package balancerload defines APIs to parse server loads in trailers. The
+// parsed loads are sent to balancers in DoneInfo.
+package balancerload
+
+import (
+	"google.golang.org/grpc/metadata"
+)
+
+// Parser converts loads from metadata into a concrete type.
+type Parser interface {
+	// Parse parses loads from metadata.
+	Parse(md metadata.MD) interface{}
+}
+
+var parser Parser
+
+// SetParser sets the load parser.
+//
+// Not mutex-protected, should be called before any gRPC functions.
+func SetParser(lr Parser) {
+	parser = lr
+}
+
+// Parse calls parser.Read().
+func Parse(md metadata.MD) interface{} {
+	if parser == nil {
+		return nil
+	}
+	return parser.Parse(md)
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
new file mode 100644
index 000000000..809d73cca
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -0,0 +1,189 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package binarylog implementation binary logging as defined in
+// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
+package binarylog
+
+import (
+	"fmt"
+	"os"
+
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
+)
+
+// Logger is the global binary logger. It can be used to get binary logger for
+// each method.
+type Logger interface {
+	GetMethodLogger(methodName string) MethodLogger
+}
+
+// binLogger is the global binary logger for the binary. One of this should be
+// built at init time from the configuration (environment variable or flags).
+//
+// It is used to get a MethodLogger for each individual method.
+var binLogger Logger
+
+var grpclogLogger = grpclog.Component("binarylog")
+
+// SetLogger sets the binary logger.
+//
+// Only call this at init time.
+func SetLogger(l Logger) {
+	binLogger = l
+}
+
+// GetLogger gets the binary logger.
+//
+// Only call this at init time.
+func GetLogger() Logger {
+	return binLogger
+}
+
+// GetMethodLogger returns the MethodLogger for the given methodName.
+//
+// methodName should be in the format of "/service/method".
+//
+// Each MethodLogger returned by this method is a new instance. This is to
+// generate sequence id within the call.
+func GetMethodLogger(methodName string) MethodLogger {
+	if binLogger == nil {
+		return nil
+	}
+	return binLogger.GetMethodLogger(methodName)
+}
+
+func init() {
+	const envStr = "GRPC_BINARY_LOG_FILTER"
+	configStr := os.Getenv(envStr)
+	binLogger = NewLoggerFromConfigString(configStr)
+}
+
+// MethodLoggerConfig contains the setting for logging behavior of a method
+// logger. Currently, it contains the max length of header and message.
+type MethodLoggerConfig struct {
+	// Max length of header and message.
+	Header, Message uint64
+}
+
+// LoggerConfig contains the config for loggers to create method loggers.
+type LoggerConfig struct {
+	All      *MethodLoggerConfig
+	Services map[string]*MethodLoggerConfig
+	Methods  map[string]*MethodLoggerConfig
+
+	Blacklist map[string]struct{}
+}
+
+type logger struct {
+	config LoggerConfig
+}
+
+// NewLoggerFromConfig builds a logger with the given LoggerConfig.
+func NewLoggerFromConfig(config LoggerConfig) Logger {
+	return &logger{config: config}
+}
+
+// newEmptyLogger creates an empty logger. The map fields need to be filled in
+// using the set* functions.
+func newEmptyLogger() *logger {
+	return &logger{}
+}
+
+// Set method logger for "*".
+func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
+	if l.config.All != nil {
+		return fmt.Errorf("conflicting global rules found")
+	}
+	l.config.All = ml
+	return nil
+}
+
+// Set method logger for "service/*".
+//
+// New MethodLogger with same service overrides the old one.
+func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
+	if _, ok := l.config.Services[service]; ok {
+		return fmt.Errorf("conflicting service rules for service %v found", service)
+	}
+	if l.config.Services == nil {
+		l.config.Services = make(map[string]*MethodLoggerConfig)
+	}
+	l.config.Services[service] = ml
+	return nil
+}
+
+// Set method logger for "service/method".
+//
+// New MethodLogger with same method overrides the old one.
+func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
+	if _, ok := l.config.Blacklist[method]; ok {
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
+	}
+	if _, ok := l.config.Methods[method]; ok {
+		return fmt.Errorf("conflicting method rules for method %v found", method)
+	}
+	if l.config.Methods == nil {
+		l.config.Methods = make(map[string]*MethodLoggerConfig)
+	}
+	l.config.Methods[method] = ml
+	return nil
+}
+
+// Set blacklist method for "-service/method".
+func (l *logger) setBlacklist(method string) error {
+	if _, ok := l.config.Blacklist[method]; ok {
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
+	}
+	if _, ok := l.config.Methods[method]; ok {
+		return fmt.Errorf("conflicting method rules for method %v found", method)
+	}
+	if l.config.Blacklist == nil {
+		l.config.Blacklist = make(map[string]struct{})
+	}
+	l.config.Blacklist[method] = struct{}{}
+	return nil
+}
+
+// getMethodLogger returns the MethodLogger for the given methodName.
+//
+// methodName should be in the format of "/service/method".
+//
+// Each MethodLogger returned by this method is a new instance. This is to
+// generate sequence id within the call.
+func (l *logger) GetMethodLogger(methodName string) MethodLogger {
+	s, m, err := grpcutil.ParseMethod(methodName)
+	if err != nil {
+		grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
+		return nil
+	}
+	if ml, ok := l.config.Methods[s+"/"+m]; ok {
+		return NewTruncatingMethodLogger(ml.Header, ml.Message)
+	}
+	if _, ok := l.config.Blacklist[s+"/"+m]; ok {
+		return nil
+	}
+	if ml, ok := l.config.Services[s]; ok {
+		return NewTruncatingMethodLogger(ml.Header, ml.Message)
+	}
+	if l.config.All == nil {
+		return nil
+	}
+	return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
new file mode 100644
index 000000000..1ee00a39a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// This file contains exported variables/functions that are exported for testing
+// only.
+//
+// An ideal way for this would be to put those in a *_test.go but in binarylog
+// package. But this doesn't work with staticcheck with go module. Error was:
+// "MdToMetadataProto not declared by package binarylog". This could be caused
+// by the way staticcheck looks for files for a certain package, which doesn't
+// support *_test.go files.
+//
+// Move those to binary_test.go when staticcheck is fixed.
+
+package binarylog
+
+var (
+	// AllLogger is a logger that logs all headers/messages for all RPCs. It's
+	// for testing only.
+	AllLogger = NewLoggerFromConfigString("*")
+	// MdToMetadataProto converts metadata to a binary logging proto message.
+	// It's for testing only.
+	MdToMetadataProto = mdToMetadataProto
+	// AddrToProto converts an address to a binary logging proto message. It's
+	// for testing only.
+	AddrToProto = addrToProto
+)
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
new file mode 100644
index 000000000..f9e80e27a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -0,0 +1,208 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// NewLoggerFromConfigString reads the string and build a logger. It can be used
+// to build a new logger and assign it to binarylog.Logger.
+//
+// Example filter config strings:
+//   - "" Nothing will be logged
+//   - "*" All headers and messages will be fully logged.
+//   - "*{h}" Only headers will be logged.
+//   - "*{m:256}" Only the first 256 bytes of each message will be logged.
+//   - "Foo/*" Logs every method in service Foo
+//   - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
+//   - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
+//     /Foo/Bar, logs all headers and messages in every other method in service
+//     Foo.
+//
+// If two configs exist for one certain method or service, the one specified
+// later overrides the previous config.
+func NewLoggerFromConfigString(s string) Logger {
+	if s == "" {
+		return nil
+	}
+	l := newEmptyLogger()
+	methods := strings.Split(s, ",")
+	for _, method := range methods {
+		if err := l.fillMethodLoggerWithConfigString(method); err != nil {
+			grpclogLogger.Warningf("failed to parse binary log config: %v", err)
+			return nil
+		}
+	}
+	return l
+}
+
+// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
+// it to the right map in the logger.
+func (l *logger) fillMethodLoggerWithConfigString(config string) error {
+	// "" is invalid.
+	if config == "" {
+		return errors.New("empty string is not a valid method binary logging config")
+	}
+
+	// "-service/method", blacklist, no * or {} allowed.
+	if config[0] == '-' {
+		s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
+		if err != nil {
+			return fmt.Errorf("invalid config: %q, %v", config, err)
+		}
+		if m == "*" {
+			return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
+		}
+		if suffix != "" {
+			return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
+		}
+		if err := l.setBlacklist(s + "/" + m); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+		return nil
+	}
+
+	// "*{h:256;m:256}"
+	if config[0] == '*' {
+		hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
+		if err != nil {
+			return fmt.Errorf("invalid config: %q, %v", config, err)
+		}
+		if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+		return nil
+	}
+
+	s, m, suffix, err := parseMethodConfigAndSuffix(config)
+	if err != nil {
+		return fmt.Errorf("invalid config: %q, %v", config, err)
+	}
+	hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
+	if err != nil {
+		return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
+	}
+	if m == "*" {
+		if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+	} else {
+		if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+	}
+	return nil
+}
+
+const (
+	// TODO: this const is only used by env_config now. But could be useful for
+	// other config. Move to binarylog.go if necessary.
+	maxUInt = ^uint64(0)
+
+	// For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
+	// expected output.
+	longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
+
+	// For suffix from above, "{h:123,m:123}". See test for expected output.
+	optionalLengthRegexpStr      = `(?::(\d+))?` // Optional ":123".
+	headerConfigRegexpStr        = `^{h` + optionalLengthRegexpStr + `}$`
+	messageConfigRegexpStr       = `^{m` + optionalLengthRegexpStr + `}$`
+	headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
+)
+
+var (
+	longMethodConfigRegexp    = regexp.MustCompile(longMethodConfigRegexpStr)
+	headerConfigRegexp        = regexp.MustCompile(headerConfigRegexpStr)
+	messageConfigRegexp       = regexp.MustCompile(messageConfigRegexpStr)
+	headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
+)
+
+// Turn "service/method{h;m}" into "service", "method", "{h;m}".
+func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
+	// Regexp result:
+	//
+	// in:  "p.s/m{h:123,m:123}",
+	// out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
+	match := longMethodConfigRegexp.FindStringSubmatch(c)
+	if match == nil {
+		return "", "", "", fmt.Errorf("%q contains invalid substring", c)
+	}
+	service = match[1]
+	method = match[2]
+	suffix = match[3]
+	return
+}
+
+// Turn "{h:123;m:345}" into 123, 345.
+//
+// Return maxUInt if length is unspecified.
+func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
+	if c == "" {
+		return maxUInt, maxUInt, nil
+	}
+	// Header config only.
+	if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
+		if s := match[1]; s != "" {
+			hdrLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+			return hdrLenStr, 0, nil
+		}
+		return maxUInt, 0, nil
+	}
+
+	// Message config only.
+	if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
+		if s := match[1]; s != "" {
+			msgLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+			return 0, msgLenStr, nil
+		}
+		return 0, maxUInt, nil
+	}
+
+	// Header and message config both.
+	if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
+		// Both hdr and msg are specified, but one or two of them might be empty.
+		hdrLenStr = maxUInt
+		msgLenStr = maxUInt
+		if s := match[1]; s != "" {
+			hdrLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+		}
+		if s := match[2]; s != "" {
+			msgLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+		}
+		return hdrLenStr, msgLenStr, nil
+	}
+	return 0, 0, fmt.Errorf("%q contains invalid substring", c)
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
new file mode 100644
index 000000000..d71e44177
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -0,0 +1,435 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"net"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+type callIDGenerator struct {
+	id uint64
+}
+
+func (g *callIDGenerator) next() uint64 {
+	id := atomic.AddUint64(&g.id, 1)
+	return id
+}
+
+// reset is for testing only, and doesn't need to be thread safe.
+func (g *callIDGenerator) reset() {
+	g.id = 0
+}
+
+var idGen callIDGenerator
+
+// MethodLogger is the sub-logger for each method.
+type MethodLogger interface {
+	Log(LogEntryConfig)
+}
+
+// TruncatingMethodLogger is a method logger that truncates headers and messages
+// based on configured fields.
+type TruncatingMethodLogger struct {
+	headerMaxLen, messageMaxLen uint64
+
+	callID          uint64
+	idWithinCallGen *callIDGenerator
+
+	sink Sink // TODO(blog): make this plugable.
+}
+
+// NewTruncatingMethodLogger returns a new truncating method logger.
+func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
+	return &TruncatingMethodLogger{
+		headerMaxLen:  h,
+		messageMaxLen: m,
+
+		callID:          idGen.next(),
+		idWithinCallGen: &callIDGenerator{},
+
+		sink: DefaultSink, // TODO(blog): make it plugable.
+	}
+}
+
+// Build is an internal only method for building the proto message out of the
+// input event. It's made public to enable other library to reuse as much logic
+// in TruncatingMethodLogger as possible.
+func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
+	m := c.toProto()
+	timestamp, _ := ptypes.TimestampProto(time.Now())
+	m.Timestamp = timestamp
+	m.CallId = ml.callID
+	m.SequenceIdWithinCall = ml.idWithinCallGen.next()
+
+	switch pay := m.Payload.(type) {
+	case *binlogpb.GrpcLogEntry_ClientHeader:
+		m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
+	case *binlogpb.GrpcLogEntry_ServerHeader:
+		m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
+	case *binlogpb.GrpcLogEntry_Message:
+		m.PayloadTruncated = ml.truncateMessage(pay.Message)
+	}
+	return m
+}
+
+// Log creates a proto binary log entry, and logs it to the sink.
+func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
+	ml.sink.Write(ml.Build(c))
+}
+
+func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
+	if ml.headerMaxLen == maxUInt {
+		return false
+	}
+	var (
+		bytesLimit = ml.headerMaxLen
+		index      int
+	)
+	// At the end of the loop, index will be the first entry where the total
+	// size is greater than the limit:
+	//
+	// len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
+	for ; index < len(mdPb.Entry); index++ {
+		entry := mdPb.Entry[index]
+		if entry.Key == "grpc-trace-bin" {
+			// "grpc-trace-bin" is a special key. It's kept in the log entry,
+			// but not counted towards the size limit.
+			continue
+		}
+		currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
+		if currentEntryLen > bytesLimit {
+			break
+		}
+		bytesLimit -= currentEntryLen
+	}
+	truncated = index < len(mdPb.Entry)
+	mdPb.Entry = mdPb.Entry[:index]
+	return truncated
+}
+
+func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
+	if ml.messageMaxLen == maxUInt {
+		return false
+	}
+	if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
+		return false
+	}
+	msgPb.Data = msgPb.Data[:ml.messageMaxLen]
+	return true
+}
+
+// LogEntryConfig represents the configuration for binary log entry.
+type LogEntryConfig interface {
+	toProto() *binlogpb.GrpcLogEntry
+}
+
+// ClientHeader configs the binary log entry to be a ClientHeader entry.
+type ClientHeader struct {
+	OnClientSide bool
+	Header       metadata.MD
+	MethodName   string
+	Authority    string
+	Timeout      time.Duration
+	// PeerAddr is required only when it's on server side.
+	PeerAddr net.Addr
+}
+
+func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
+	// This function doesn't need to set all the fields (e.g. seq ID). The Log
+	// function will set the fields when necessary.
+	clientHeader := &binlogpb.ClientHeader{
+		Metadata:   mdToMetadataProto(c.Header),
+		MethodName: c.MethodName,
+		Authority:  c.Authority,
+	}
+	if c.Timeout > 0 {
+		clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
+	}
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
+		Payload: &binlogpb.GrpcLogEntry_ClientHeader{
+			ClientHeader: clientHeader,
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	if c.PeerAddr != nil {
+		ret.Peer = addrToProto(c.PeerAddr)
+	}
+	return ret
+}
+
+// ServerHeader configs the binary log entry to be a ServerHeader entry.
+type ServerHeader struct {
+	OnClientSide bool
+	Header       metadata.MD
+	// PeerAddr is required only when it's on client side.
+	PeerAddr net.Addr
+}
+
+func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
+		Payload: &binlogpb.GrpcLogEntry_ServerHeader{
+			ServerHeader: &binlogpb.ServerHeader{
+				Metadata: mdToMetadataProto(c.Header),
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	if c.PeerAddr != nil {
+		ret.Peer = addrToProto(c.PeerAddr)
+	}
+	return ret
+}
+
+// ClientMessage configs the binary log entry to be a ClientMessage entry.
+type ClientMessage struct {
+	OnClientSide bool
+	// Message can be a proto.Message or []byte. Other messages formats are not
+	// supported.
+	Message interface{}
+}
+
+func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
+	var (
+		data []byte
+		err  error
+	)
+	if m, ok := c.Message.(proto.Message); ok {
+		data, err = proto.Marshal(m)
+		if err != nil {
+			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
+		}
+	} else if b, ok := c.Message.([]byte); ok {
+		data = b
+	} else {
+		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
+	}
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
+		Payload: &binlogpb.GrpcLogEntry_Message{
+			Message: &binlogpb.Message{
+				Length: uint32(len(data)),
+				Data:   data,
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// ServerMessage configs the binary log entry to be a ServerMessage entry.
+type ServerMessage struct {
+	OnClientSide bool
+	// Message can be a proto.Message or []byte. Other messages formats are not
+	// supported.
+	Message interface{}
+}
+
+func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
+	var (
+		data []byte
+		err  error
+	)
+	if m, ok := c.Message.(proto.Message); ok {
+		data, err = proto.Marshal(m)
+		if err != nil {
+			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
+		}
+	} else if b, ok := c.Message.([]byte); ok {
+		data = b
+	} else {
+		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
+	}
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
+		Payload: &binlogpb.GrpcLogEntry_Message{
+			Message: &binlogpb.Message{
+				Length: uint32(len(data)),
+				Data:   data,
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
+type ClientHalfClose struct {
+	OnClientSide bool
+}
+
+func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
+	ret := &binlogpb.GrpcLogEntry{
+		Type:    binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
+		Payload: nil, // No payload here.
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
+type ServerTrailer struct {
+	OnClientSide bool
+	Trailer      metadata.MD
+	// Err is the status error.
+	Err error
+	// PeerAddr is required only when it's on client side and the RPC is trailer
+	// only.
+	PeerAddr net.Addr
+}
+
+func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
+	st, ok := status.FromError(c.Err)
+	if !ok {
+		grpclogLogger.Info("binarylogging: error in trailer is not a status error")
+	}
+	var (
+		detailsBytes []byte
+		err          error
+	)
+	stProto := st.Proto()
+	if stProto != nil && len(stProto.Details) != 0 {
+		detailsBytes, err = proto.Marshal(stProto)
+		if err != nil {
+			grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
+		}
+	}
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
+		Payload: &binlogpb.GrpcLogEntry_Trailer{
+			Trailer: &binlogpb.Trailer{
+				Metadata:      mdToMetadataProto(c.Trailer),
+				StatusCode:    uint32(st.Code()),
+				StatusMessage: st.Message(),
+				StatusDetails: detailsBytes,
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	if c.PeerAddr != nil {
+		ret.Peer = addrToProto(c.PeerAddr)
+	}
+	return ret
+}
+
+// Cancel configs the binary log entry to be a Cancel entry.
+type Cancel struct {
+	OnClientSide bool
+}
+
+func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
+	ret := &binlogpb.GrpcLogEntry{
+		Type:    binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
+		Payload: nil,
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// metadataKeyOmit returns whether the metadata entry with this key should be
+// omitted.
+func metadataKeyOmit(key string) bool {
+	switch key {
+	case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
+		return true
+	case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
+		return false
+	}
+	return strings.HasPrefix(key, "grpc-")
+}
+
+func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
+	ret := &binlogpb.Metadata{}
+	for k, vv := range md {
+		if metadataKeyOmit(k) {
+			continue
+		}
+		for _, v := range vv {
+			ret.Entry = append(ret.Entry,
+				&binlogpb.MetadataEntry{
+					Key:   k,
+					Value: []byte(v),
+				},
+			)
+		}
+	}
+	return ret
+}
+
+func addrToProto(addr net.Addr) *binlogpb.Address {
+	ret := &binlogpb.Address{}
+	switch a := addr.(type) {
+	case *net.TCPAddr:
+		if a.IP.To4() != nil {
+			ret.Type = binlogpb.Address_TYPE_IPV4
+		} else if a.IP.To16() != nil {
+			ret.Type = binlogpb.Address_TYPE_IPV6
+		} else {
+			ret.Type = binlogpb.Address_TYPE_UNKNOWN
+			// Do not set address and port fields.
+			break
+		}
+		ret.Address = a.IP.String()
+		ret.IpPort = uint32(a.Port)
+	case *net.UnixAddr:
+		ret.Type = binlogpb.Address_TYPE_UNIX
+		ret.Address = a.String()
+	default:
+		ret.Type = binlogpb.Address_TYPE_UNKNOWN
+	}
+	return ret
+}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
new file mode 100644
index 000000000..264de387c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -0,0 +1,170 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"bufio"
+	"encoding/binary"
+	"io"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+)
+
+var (
+	// DefaultSink is the sink where the logs will be written to. It's exported
+	// for the binarylog package to update.
+	DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
+)
+
+// Sink writes log entry into the binary log sink.
+//
+// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
+type Sink interface {
+	// Write will be called to write the log entry into the sink.
+	//
+	// It should be thread-safe so it can be called in parallel.
+	Write(*binlogpb.GrpcLogEntry) error
+	// Close will be called when the Sink is replaced by a new Sink.
+	Close() error
+}
+
+type noopSink struct{}
+
+func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
+func (ns *noopSink) Close() error                       { return nil }
+
+// newWriterSink creates a binary log sink with the given writer.
+//
+// Write() marshals the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// No buffer is done, Close() doesn't try to close the writer.
+func newWriterSink(w io.Writer) Sink {
+	return &writerSink{out: w}
+}
+
+type writerSink struct {
+	out io.Writer
+}
+
+func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
+	b, err := proto.Marshal(e)
+	if err != nil {
+		grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
+		return err
+	}
+	hdr := make([]byte, 4)
+	binary.BigEndian.PutUint32(hdr, uint32(len(b)))
+	if _, err := ws.out.Write(hdr); err != nil {
+		return err
+	}
+	if _, err := ws.out.Write(b); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (ws *writerSink) Close() error { return nil }
+
+type bufferedSink struct {
+	mu             sync.Mutex
+	closer         io.Closer
+	out            Sink          // out is built on buf.
+	buf            *bufio.Writer // buf is kept for flush.
+	flusherStarted bool
+
+	writeTicker *time.Ticker
+	done        chan struct{}
+}
+
+func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if !fs.flusherStarted {
+		// Start the write loop when Write is called.
+		fs.startFlushGoroutine()
+		fs.flusherStarted = true
+	}
+	if err := fs.out.Write(e); err != nil {
+		return err
+	}
+	return nil
+}
+
+const (
+	bufFlushDuration = 60 * time.Second
+)
+
+func (fs *bufferedSink) startFlushGoroutine() {
+	fs.writeTicker = time.NewTicker(bufFlushDuration)
+	go func() {
+		for {
+			select {
+			case <-fs.done:
+				return
+			case <-fs.writeTicker.C:
+			}
+			fs.mu.Lock()
+			if err := fs.buf.Flush(); err != nil {
+				grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+			}
+			fs.mu.Unlock()
+		}
+	}()
+}
+
+func (fs *bufferedSink) Close() error {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if fs.writeTicker != nil {
+		fs.writeTicker.Stop()
+	}
+	close(fs.done)
+	if err := fs.buf.Flush(); err != nil {
+		grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+	}
+	if err := fs.closer.Close(); err != nil {
+		grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
+	}
+	if err := fs.out.Close(); err != nil {
+		grpclogLogger.Warningf("failed to close the Sink: %v", err)
+	}
+	return nil
+}
+
+// NewBufferedSink creates a binary log sink with the given WriteCloser.
+//
+// Write() marshals the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// Content is kept in a buffer, and is flushed every 60 seconds.
+//
+// Close closes the WriteCloser.
+func NewBufferedSink(o io.WriteCloser) Sink {
+	bufW := bufio.NewWriter(o)
+	return &bufferedSink{
+		closer: o,
+		out:    newWriterSink(bufW),
+		buf:    bufW,
+		done:   make(chan struct{}),
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
new file mode 100644
index 000000000..9f6a0c120
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package buffer provides an implementation of an unbounded buffer.
+package buffer
+
+import "sync"
+
+// Unbounded is an implementation of an unbounded buffer which does not use
+// extra goroutines. This is typically used for passing updates from one entity
+// to another within gRPC.
+//
+// All methods on this type are thread-safe and don't block on anything except
+// the underlying mutex used for synchronization.
+//
+// Unbounded supports values of any type to be stored in it by using a channel
+// of `interface{}`. This means that a call to Put() incurs an extra memory
+// allocation, and also that users need a type assertion while reading. For
+// performance critical code paths, using Unbounded is strongly discouraged and
+// defining a new type specific implementation of this buffer is preferred. See
+// internal/transport/transport.go for an example of this.
+type Unbounded struct {
+	c       chan interface{}
+	mu      sync.Mutex
+	backlog []interface{}
+}
+
+// NewUnbounded returns a new instance of Unbounded.
+func NewUnbounded() *Unbounded {
+	return &Unbounded{c: make(chan interface{}, 1)}
+}
+
+// Put adds t to the unbounded buffer.
+func (b *Unbounded) Put(t interface{}) {
+	b.mu.Lock()
+	if len(b.backlog) == 0 {
+		select {
+		case b.c <- t:
+			b.mu.Unlock()
+			return
+		default:
+		}
+	}
+	b.backlog = append(b.backlog, t)
+	b.mu.Unlock()
+}
+
+// Load sends the earliest buffered data, if any, onto the read channel
+// returned by Get(). Users are expected to call this every time they read a
+// value from the read channel.
+func (b *Unbounded) Load() {
+	b.mu.Lock()
+	if len(b.backlog) > 0 {
+		select {
+		case b.c <- b.backlog[0]:
+			b.backlog[0] = nil
+			b.backlog = b.backlog[1:]
+		default:
+		}
+	}
+	b.mu.Unlock()
+}
+
+// Get returns a read channel on which values added to the buffer, via Put(),
+// are sent on.
+//
+// Upon reading a value from this channel, users are expected to call Load() to
+// send the next buffered value onto the channel if there is any.
+func (b *Unbounded) Get() <-chan interface{} {
+	return b.c
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
new file mode 100644
index 000000000..777cbcd79
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -0,0 +1,789 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package channelz defines APIs for enabling channelz service, entry
+// registration/deletion, and accessing channelz data. It also defines channelz
+// metric struct formats.
+//
+// All APIs in this package are experimental.
+package channelz
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+const (
+	defaultMaxTraceEntry int32 = 30
+)
+
+var (
+	db    dbWrapper
+	idGen idGenerator
+	// EntryPerPage defines the number of channelz entries to be shown on a web page.
+	EntryPerPage  = int64(50)
+	curState      int32
+	maxTraceEntry = defaultMaxTraceEntry
+)
+
+// TurnOn turns on channelz data collection.
+func TurnOn() {
+	if !IsOn() {
+		db.set(newChannelMap())
+		idGen.reset()
+		atomic.StoreInt32(&curState, 1)
+	}
+}
+
+// IsOn returns whether channelz data collection is on.
+func IsOn() bool {
+	return atomic.CompareAndSwapInt32(&curState, 1, 1)
+}
+
+// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
+// Setting it to 0 will disable channel tracing.
+func SetMaxTraceEntry(i int32) {
+	atomic.StoreInt32(&maxTraceEntry, i)
+}
+
+// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
+func ResetMaxTraceEntryToDefault() {
+	atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
+}
+
+func getMaxTraceEntry() int {
+	i := atomic.LoadInt32(&maxTraceEntry)
+	return int(i)
+}
+
+// dbWarpper wraps around a reference to internal channelz data storage, and
+// provide synchronized functionality to set and get the reference.
+type dbWrapper struct {
+	mu sync.RWMutex
+	DB *channelMap
+}
+
+func (d *dbWrapper) set(db *channelMap) {
+	d.mu.Lock()
+	d.DB = db
+	d.mu.Unlock()
+}
+
+func (d *dbWrapper) get() *channelMap {
+	d.mu.RLock()
+	defer d.mu.RUnlock()
+	return d.DB
+}
+
+// NewChannelzStorageForTesting initializes channelz data storage and id
+// generator for testing purposes.
+//
+// Returns a cleanup function to be invoked by the test, which waits for up to
+// 10s for all channelz state to be reset by the grpc goroutines when those
+// entities get closed. This cleanup function helps with ensuring that tests
+// don't mess up each other.
+func NewChannelzStorageForTesting() (cleanup func() error) {
+	db.set(newChannelMap())
+	idGen.reset()
+
+	return func() error {
+		cm := db.get()
+		if cm == nil {
+			return nil
+		}
+
+		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+		defer cancel()
+		ticker := time.NewTicker(10 * time.Millisecond)
+		defer ticker.Stop()
+		for {
+			cm.mu.RLock()
+			topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
+			cm.mu.RUnlock()
+
+			if err := ctx.Err(); err != nil {
+				return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
+			}
+			if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
+				return nil
+			}
+			<-ticker.C
+		}
+	}
+}
+
+// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
+// boolean indicating whether there's more top channels to be queried for.
+//
+// The arg id specifies that only top channel with id at or above it will be included
+// in the result. The returned slice is up to a length of the arg maxResults or
+// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
+	return db.get().GetTopChannels(id, maxResults)
+}
+
+// GetServers returns a slice of server's ServerMetric, along with a
+// boolean indicating whether there's more servers to be queried for.
+//
+// The arg id specifies that only server with id at or above it will be included
+// in the result. The returned slice is up to a length of the arg maxResults or
+// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
+	return db.get().GetServers(id, maxResults)
+}
+
+// GetServerSockets returns a slice of server's (identified by id) normal socket's
+// SocketMetric, along with a boolean indicating whether there's more sockets to
+// be queried for.
+//
+// The arg startID specifies that only sockets with id at or above it will be
+// included in the result. The returned slice is up to a length of the arg maxResults
+// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
+	return db.get().GetServerSockets(id, startID, maxResults)
+}
+
+// GetChannel returns the ChannelMetric for the channel (identified by id).
+func GetChannel(id int64) *ChannelMetric {
+	return db.get().GetChannel(id)
+}
+
+// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
+func GetSubChannel(id int64) *SubChannelMetric {
+	return db.get().GetSubChannel(id)
+}
+
+// GetSocket returns the SocketInternalMetric for the socket (identified by id).
+func GetSocket(id int64) *SocketMetric {
+	return db.get().GetSocket(id)
+}
+
+// GetServer returns the ServerMetric for the server (identified by id).
+func GetServer(id int64) *ServerMetric {
+	return db.get().GetServer(id)
+}
+
+// RegisterChannel registers the given channel c in the channelz database with
+// ref as its reference name, and adds it to the child list of its parent
+// (identified by pid). pid == nil means no parent.
+//
+// Returns a unique channelz identifier assigned to this channel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
+	id := idGen.genID()
+	var parent int64
+	isTopChannel := true
+	if pid != nil {
+		isTopChannel = false
+		parent = pid.Int()
+	}
+
+	if !IsOn() {
+		return newIdentifer(RefChannel, id, pid)
+	}
+
+	cn := &channel{
+		refName:     ref,
+		c:           c,
+		subChans:    make(map[int64]string),
+		nestedChans: make(map[int64]string),
+		id:          id,
+		pid:         parent,
+		trace:       &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
+	}
+	db.get().addChannel(id, cn, isTopChannel, parent)
+	return newIdentifer(RefChannel, id, pid)
+}
+
+// RegisterSubChannel registers the given subChannel c in the channelz database
+// with ref as its reference name, and adds it to the child list of its parent
+// (identified by pid).
+//
+// Returns a unique channelz identifier assigned to this subChannel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
+	if pid == nil {
+		return nil, errors.New("a SubChannel's parent id cannot be nil")
+	}
+	id := idGen.genID()
+	if !IsOn() {
+		return newIdentifer(RefSubChannel, id, pid), nil
+	}
+
+	sc := &subChannel{
+		refName: ref,
+		c:       c,
+		sockets: make(map[int64]string),
+		id:      id,
+		pid:     pid.Int(),
+		trace:   &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
+	}
+	db.get().addSubChannel(id, sc, pid.Int())
+	return newIdentifer(RefSubChannel, id, pid), nil
+}
+
+// RegisterServer registers the given server s in channelz database. It returns
+// the unique channelz tracking id assigned to this server.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterServer(s Server, ref string) *Identifier {
+	id := idGen.genID()
+	if !IsOn() {
+		return newIdentifer(RefServer, id, nil)
+	}
+
+	svr := &server{
+		refName:       ref,
+		s:             s,
+		sockets:       make(map[int64]string),
+		listenSockets: make(map[int64]string),
+		id:            id,
+	}
+	db.get().addServer(id, svr)
+	return newIdentifer(RefServer, id, nil)
+}
+
+// RegisterListenSocket registers the given listen socket s in channelz database
+// with ref as its reference name, and add it to the child list of its parent
+// (identified by pid). It returns the unique channelz tracking id assigned to
+// this listen socket.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
+	if pid == nil {
+		return nil, errors.New("a ListenSocket's parent id cannot be 0")
+	}
+	id := idGen.genID()
+	if !IsOn() {
+		return newIdentifer(RefListenSocket, id, pid), nil
+	}
+
+	ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
+	db.get().addListenSocket(id, ls, pid.Int())
+	return newIdentifer(RefListenSocket, id, pid), nil
+}
+
+// RegisterNormalSocket registers the given normal socket s in channelz database
+// with ref as its reference name, and adds it to the child list of its parent
+// (identified by pid). It returns the unique channelz tracking id assigned to
+// this normal socket.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
+	if pid == nil {
+		return nil, errors.New("a NormalSocket's parent id cannot be 0")
+	}
+	id := idGen.genID()
+	if !IsOn() {
+		return newIdentifer(RefNormalSocket, id, pid), nil
+	}
+
+	ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
+	db.get().addNormalSocket(id, ns, pid.Int())
+	return newIdentifer(RefNormalSocket, id, pid), nil
+}
+
+// RemoveEntry removes an entry with unique channelz tracking id to be id from
+// channelz database.
+//
+// If channelz is not turned ON, this function is a no-op.
+func RemoveEntry(id *Identifier) {
+	if !IsOn() {
+		return
+	}
+	db.get().removeEntry(id.Int())
+}
+
+// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
+// the event to be added to the channel trace.
+//
+// The Parent field is optional. It is used for an event that will be recorded
+// in the entity's parent trace.
+type TraceEventDesc struct {
+	Desc     string
+	Severity Severity
+	Parent   *TraceEventDesc
+}
+
+// AddTraceEvent adds trace related to the entity with specified id, using the
+// provided TraceEventDesc.
+//
+// If channelz is not turned ON, this will simply log the event descriptions.
+func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
+	// Log only the trace description associated with the bottom most entity.
+	switch desc.Severity {
+	case CtUnknown, CtInfo:
+		l.InfoDepth(depth+1, withParens(id)+desc.Desc)
+	case CtWarning:
+		l.WarningDepth(depth+1, withParens(id)+desc.Desc)
+	case CtError:
+		l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
+	}
+
+	if getMaxTraceEntry() == 0 {
+		return
+	}
+	if IsOn() {
+		db.get().traceEvent(id.Int(), desc)
+	}
+}
+
+// channelMap is the storage data structure for channelz.
+// Methods of channelMap can be divided in two two categories with respect to locking.
+// 1. Methods acquire the global lock.
+// 2. Methods that can only be called when global lock is held.
+// A second type of method need always to be called inside a first type of method.
+type channelMap struct {
+	mu               sync.RWMutex
+	topLevelChannels map[int64]struct{}
+	servers          map[int64]*server
+	channels         map[int64]*channel
+	subChannels      map[int64]*subChannel
+	listenSockets    map[int64]*listenSocket
+	normalSockets    map[int64]*normalSocket
+}
+
+func newChannelMap() *channelMap {
+	return &channelMap{
+		topLevelChannels: make(map[int64]struct{}),
+		channels:         make(map[int64]*channel),
+		listenSockets:    make(map[int64]*listenSocket),
+		normalSockets:    make(map[int64]*normalSocket),
+		servers:          make(map[int64]*server),
+		subChannels:      make(map[int64]*subChannel),
+	}
+}
+
+func (c *channelMap) addServer(id int64, s *server) {
+	c.mu.Lock()
+	s.cm = c
+	c.servers[id] = s
+	c.mu.Unlock()
+}
+
+func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
+	c.mu.Lock()
+	cn.cm = c
+	cn.trace.cm = c
+	c.channels[id] = cn
+	if isTopChannel {
+		c.topLevelChannels[id] = struct{}{}
+	} else {
+		c.findEntry(pid).addChild(id, cn)
+	}
+	c.mu.Unlock()
+}
+
+func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
+	c.mu.Lock()
+	sc.cm = c
+	sc.trace.cm = c
+	c.subChannels[id] = sc
+	c.findEntry(pid).addChild(id, sc)
+	c.mu.Unlock()
+}
+
+func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
+	c.mu.Lock()
+	ls.cm = c
+	c.listenSockets[id] = ls
+	c.findEntry(pid).addChild(id, ls)
+	c.mu.Unlock()
+}
+
+func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
+	c.mu.Lock()
+	ns.cm = c
+	c.normalSockets[id] = ns
+	c.findEntry(pid).addChild(id, ns)
+	c.mu.Unlock()
+}
+
+// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
+// wait on the deletion of its children and until no other entity's channel trace references it.
+// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
+// shutting down server will lead to the server being also deleted.
+func (c *channelMap) removeEntry(id int64) {
+	c.mu.Lock()
+	c.findEntry(id).triggerDelete()
+	c.mu.Unlock()
+}
+
+// c.mu must be held by the caller
+func (c *channelMap) decrTraceRefCount(id int64) {
+	e := c.findEntry(id)
+	if v, ok := e.(tracedChannel); ok {
+		v.decrTraceRefCount()
+		e.deleteSelfIfReady()
+	}
+}
+
+// c.mu must be held by the caller.
+func (c *channelMap) findEntry(id int64) entry {
+	var v entry
+	var ok bool
+	if v, ok = c.channels[id]; ok {
+		return v
+	}
+	if v, ok = c.subChannels[id]; ok {
+		return v
+	}
+	if v, ok = c.servers[id]; ok {
+		return v
+	}
+	if v, ok = c.listenSockets[id]; ok {
+		return v
+	}
+	if v, ok = c.normalSockets[id]; ok {
+		return v
+	}
+	return &dummyEntry{idNotFound: id}
+}
+
+// c.mu must be held by the caller
+// deleteEntry simply deletes an entry from the channelMap. Before calling this
+// method, caller must check this entry is ready to be deleted, i.e removeEntry()
+// has been called on it, and no children still exist.
+// Conditionals are ordered by the expected frequency of deletion of each entity
+// type, in order to optimize performance.
+func (c *channelMap) deleteEntry(id int64) {
+	var ok bool
+	if _, ok = c.normalSockets[id]; ok {
+		delete(c.normalSockets, id)
+		return
+	}
+	if _, ok = c.subChannels[id]; ok {
+		delete(c.subChannels, id)
+		return
+	}
+	if _, ok = c.channels[id]; ok {
+		delete(c.channels, id)
+		delete(c.topLevelChannels, id)
+		return
+	}
+	if _, ok = c.listenSockets[id]; ok {
+		delete(c.listenSockets, id)
+		return
+	}
+	if _, ok = c.servers[id]; ok {
+		delete(c.servers, id)
+		return
+	}
+}
+
+func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
+	c.mu.Lock()
+	child := c.findEntry(id)
+	childTC, ok := child.(tracedChannel)
+	if !ok {
+		c.mu.Unlock()
+		return
+	}
+	childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
+	if desc.Parent != nil {
+		parent := c.findEntry(child.getParentID())
+		var chanType RefChannelType
+		switch child.(type) {
+		case *channel:
+			chanType = RefChannel
+		case *subChannel:
+			chanType = RefSubChannel
+		}
+		if parentTC, ok := parent.(tracedChannel); ok {
+			parentTC.getChannelTrace().append(&TraceEvent{
+				Desc:      desc.Parent.Desc,
+				Severity:  desc.Parent.Severity,
+				Timestamp: time.Now(),
+				RefID:     id,
+				RefName:   childTC.getRefName(),
+				RefType:   chanType,
+			})
+			childTC.incrTraceRefCount()
+		}
+	}
+	c.mu.Unlock()
+}
+
+type int64Slice []int64
+
+func (s int64Slice) Len() int           { return len(s) }
+func (s int64Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
+
+func copyMap(m map[int64]string) map[int64]string {
+	n := make(map[int64]string)
+	for k, v := range m {
+		n[k] = v
+	}
+	return n
+}
+
+func min(a, b int64) int64 {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
+	if maxResults <= 0 {
+		maxResults = EntryPerPage
+	}
+	c.mu.RLock()
+	l := int64(len(c.topLevelChannels))
+	ids := make([]int64, 0, l)
+	cns := make([]*channel, 0, min(l, maxResults))
+
+	for k := range c.topLevelChannels {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+	count := int64(0)
+	var end bool
+	var t []*ChannelMetric
+	for i, v := range ids[idx:] {
+		if count == maxResults {
+			break
+		}
+		if cn, ok := c.channels[v]; ok {
+			cns = append(cns, cn)
+			t = append(t, &ChannelMetric{
+				NestedChans: copyMap(cn.nestedChans),
+				SubChans:    copyMap(cn.subChans),
+			})
+			count++
+		}
+		if i == len(ids[idx:])-1 {
+			end = true
+			break
+		}
+	}
+	c.mu.RUnlock()
+	if count == 0 {
+		end = true
+	}
+
+	for i, cn := range cns {
+		t[i].ChannelData = cn.c.ChannelzMetric()
+		t[i].ID = cn.id
+		t[i].RefName = cn.refName
+		t[i].Trace = cn.trace.dumpData()
+	}
+	return t, end
+}
+
+func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
+	if maxResults <= 0 {
+		maxResults = EntryPerPage
+	}
+	c.mu.RLock()
+	l := int64(len(c.servers))
+	ids := make([]int64, 0, l)
+	ss := make([]*server, 0, min(l, maxResults))
+	for k := range c.servers {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+	count := int64(0)
+	var end bool
+	var s []*ServerMetric
+	for i, v := range ids[idx:] {
+		if count == maxResults {
+			break
+		}
+		if svr, ok := c.servers[v]; ok {
+			ss = append(ss, svr)
+			s = append(s, &ServerMetric{
+				ListenSockets: copyMap(svr.listenSockets),
+			})
+			count++
+		}
+		if i == len(ids[idx:])-1 {
+			end = true
+			break
+		}
+	}
+	c.mu.RUnlock()
+	if count == 0 {
+		end = true
+	}
+
+	for i, svr := range ss {
+		s[i].ServerData = svr.s.ChannelzMetric()
+		s[i].ID = svr.id
+		s[i].RefName = svr.refName
+	}
+	return s, end
+}
+
+func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
+	if maxResults <= 0 {
+		maxResults = EntryPerPage
+	}
+	var svr *server
+	var ok bool
+	c.mu.RLock()
+	if svr, ok = c.servers[id]; !ok {
+		// server with id doesn't exist.
+		c.mu.RUnlock()
+		return nil, true
+	}
+	svrskts := svr.sockets
+	l := int64(len(svrskts))
+	ids := make([]int64, 0, l)
+	sks := make([]*normalSocket, 0, min(l, maxResults))
+	for k := range svrskts {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
+	count := int64(0)
+	var end bool
+	for i, v := range ids[idx:] {
+		if count == maxResults {
+			break
+		}
+		if ns, ok := c.normalSockets[v]; ok {
+			sks = append(sks, ns)
+			count++
+		}
+		if i == len(ids[idx:])-1 {
+			end = true
+			break
+		}
+	}
+	c.mu.RUnlock()
+	if count == 0 {
+		end = true
+	}
+	s := make([]*SocketMetric, 0, len(sks))
+	for _, ns := range sks {
+		sm := &SocketMetric{}
+		sm.SocketData = ns.s.ChannelzMetric()
+		sm.ID = ns.id
+		sm.RefName = ns.refName
+		s = append(s, sm)
+	}
+	return s, end
+}
+
+func (c *channelMap) GetChannel(id int64) *ChannelMetric {
+	cm := &ChannelMetric{}
+	var cn *channel
+	var ok bool
+	c.mu.RLock()
+	if cn, ok = c.channels[id]; !ok {
+		// channel with id doesn't exist.
+		c.mu.RUnlock()
+		return nil
+	}
+	cm.NestedChans = copyMap(cn.nestedChans)
+	cm.SubChans = copyMap(cn.subChans)
+	// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
+	// holding the lock to prevent potential data race.
+	chanCopy := cn.c
+	c.mu.RUnlock()
+	cm.ChannelData = chanCopy.ChannelzMetric()
+	cm.ID = cn.id
+	cm.RefName = cn.refName
+	cm.Trace = cn.trace.dumpData()
+	return cm
+}
+
+func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
+	cm := &SubChannelMetric{}
+	var sc *subChannel
+	var ok bool
+	c.mu.RLock()
+	if sc, ok = c.subChannels[id]; !ok {
+		// subchannel with id doesn't exist.
+		c.mu.RUnlock()
+		return nil
+	}
+	cm.Sockets = copyMap(sc.sockets)
+	// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
+	// holding the lock to prevent potential data race.
+	chanCopy := sc.c
+	c.mu.RUnlock()
+	cm.ChannelData = chanCopy.ChannelzMetric()
+	cm.ID = sc.id
+	cm.RefName = sc.refName
+	cm.Trace = sc.trace.dumpData()
+	return cm
+}
+
+func (c *channelMap) GetSocket(id int64) *SocketMetric {
+	sm := &SocketMetric{}
+	c.mu.RLock()
+	if ls, ok := c.listenSockets[id]; ok {
+		c.mu.RUnlock()
+		sm.SocketData = ls.s.ChannelzMetric()
+		sm.ID = ls.id
+		sm.RefName = ls.refName
+		return sm
+	}
+	if ns, ok := c.normalSockets[id]; ok {
+		c.mu.RUnlock()
+		sm.SocketData = ns.s.ChannelzMetric()
+		sm.ID = ns.id
+		sm.RefName = ns.refName
+		return sm
+	}
+	c.mu.RUnlock()
+	return nil
+}
+
+func (c *channelMap) GetServer(id int64) *ServerMetric {
+	sm := &ServerMetric{}
+	var svr *server
+	var ok bool
+	c.mu.RLock()
+	if svr, ok = c.servers[id]; !ok {
+		c.mu.RUnlock()
+		return nil
+	}
+	sm.ListenSockets = copyMap(svr.listenSockets)
+	c.mu.RUnlock()
+	sm.ID = svr.id
+	sm.RefName = svr.refName
+	sm.ServerData = svr.s.ChannelzMetric()
+	return sm
+}
+
+type idGenerator struct {
+	id int64
+}
+
+func (i *idGenerator) reset() {
+	atomic.StoreInt64(&i.id, 0)
+}
+
+func (i *idGenerator) genID() int64 {
+	return atomic.AddInt64(&i.id, 1)
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go
new file mode 100644
index 000000000..c9a27acd3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/id.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import "fmt"
+
+// Identifier is an opaque identifier which uniquely identifies an entity in the
+// channelz database.
+type Identifier struct {
+	typ RefChannelType
+	id  int64
+	str string
+	pid *Identifier
+}
+
+// Type returns the entity type corresponding to id.
+func (id *Identifier) Type() RefChannelType {
+	return id.typ
+}
+
+// Int returns the integer identifier corresponding to id.
+func (id *Identifier) Int() int64 {
+	return id.id
+}
+
+// String returns a string representation of the entity corresponding to id.
+//
+// This includes some information about the parent as well. Examples:
+// Top-level channel: [Channel #channel-number]
+// Nested channel:    [Channel #parent-channel-number Channel #channel-number]
+// Sub channel:       [Channel #parent-channel SubChannel #subchannel-number]
+func (id *Identifier) String() string {
+	return id.str
+}
+
+// Equal returns true if other is the same as id.
+func (id *Identifier) Equal(other *Identifier) bool {
+	if (id != nil) != (other != nil) {
+		return false
+	}
+	if id == nil && other == nil {
+		return true
+	}
+	return id.typ == other.typ && id.id == other.id && id.pid == other.pid
+}
+
+// NewIdentifierForTesting returns a new opaque identifier to be used only for
+// testing purposes.
+func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
+	return newIdentifer(typ, id, pid)
+}
+
+func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
+	str := fmt.Sprintf("%s #%d", typ, id)
+	if pid != nil {
+		str = fmt.Sprintf("%s %s", pid, str)
+	}
+	return &Identifier{typ: typ, id: id, str: str, pid: pid}
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
new file mode 100644
index 000000000..8e13a3d2c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("channelz")
+
+func withParens(id *Identifier) string {
+	return "[" + id.String() + "] "
+}
+
+// Info logs and adds a trace event if channelz is on.
+func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
+	AddTraceEvent(l, id, 1, &TraceEventDesc{
+		Desc:     fmt.Sprint(args...),
+		Severity: CtInfo,
+	})
+}
+
+// Infof logs and adds a trace event if channelz is on.
+func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
+	AddTraceEvent(l, id, 1, &TraceEventDesc{
+		Desc:     fmt.Sprintf(format, args...),
+		Severity: CtInfo,
+	})
+}
+
+// Warning logs and adds a trace event if channelz is on.
+func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
+	AddTraceEvent(l, id, 1, &TraceEventDesc{
+		Desc:     fmt.Sprint(args...),
+		Severity: CtWarning,
+	})
+}
+
+// Warningf logs and adds a trace event if channelz is on.
+func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
+	AddTraceEvent(l, id, 1, &TraceEventDesc{
+		Desc:     fmt.Sprintf(format, args...),
+		Severity: CtWarning,
+	})
+}
+
+// Error logs and adds a trace event if channelz is on.
+func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
+	AddTraceEvent(l, id, 1, &TraceEventDesc{
+		Desc:     fmt.Sprint(args...),
+		Severity: CtError,
+	})
+}
+
+// Errorf logs and adds a trace event if channelz is on.
+func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
+	AddTraceEvent(l, id, 1, &TraceEventDesc{
+		Desc:     fmt.Sprintf(format, args...),
+		Severity: CtError,
+	})
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
new file mode 100644
index 000000000..7b2f350e2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
@@ -0,0 +1,722 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/credentials"
+)
+
+// entry represents a node in the channelz database.
+type entry interface {
+	// addChild adds a child e, whose channelz id is id to child list
+	addChild(id int64, e entry)
+	// deleteChild deletes a child with channelz id to be id from child list
+	deleteChild(id int64)
+	// triggerDelete tries to delete self from channelz database. However, if child
+	// list is not empty, then deletion from the database is on hold until the last
+	// child is deleted from database.
+	triggerDelete()
+	// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
+	// list is now empty. If both conditions are met, then delete self from database.
+	deleteSelfIfReady()
+	// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
+	getParentID() int64
+}
+
+// dummyEntry is a fake entry to handle entry not found case.
+type dummyEntry struct {
+	idNotFound int64
+}
+
+func (d *dummyEntry) addChild(id int64, e entry) {
+	// Note: It is possible for a normal program to reach here under race condition.
+	// For example, there could be a race between ClientConn.Close() info being propagated
+	// to addrConn and http2Client. ClientConn.Close() cancel the context and result
+	// in http2Client to error. The error info is then caught by transport monitor
+	// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
+	// the addrConn will create a new transport. And when registering the new transport in
+	// channelz, its parent addrConn could have already been torn down and deleted
+	// from channelz tracking, and thus reach the code here.
+	logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+}
+
+func (d *dummyEntry) deleteChild(id int64) {
+	// It is possible for a normal program to reach here under race condition.
+	// Refer to the example described in addChild().
+	logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+}
+
+func (d *dummyEntry) triggerDelete() {
+	logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+}
+
+func (*dummyEntry) deleteSelfIfReady() {
+	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
+}
+
+func (*dummyEntry) getParentID() int64 {
+	return 0
+}
+
+// ChannelMetric defines the info channelz provides for a specific Channel, which
+// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
+// child list, etc.
+type ChannelMetric struct {
+	// ID is the channelz id of this channel.
+	ID int64
+	// RefName is the human readable reference string of this channel.
+	RefName string
+	// ChannelData contains channel internal metric reported by the channel through
+	// ChannelzMetric().
+	ChannelData *ChannelInternalMetric
+	// NestedChans tracks the nested channel type children of this channel in the format of
+	// a map from nested channel channelz id to corresponding reference string.
+	NestedChans map[int64]string
+	// SubChans tracks the subchannel type children of this channel in the format of a
+	// map from subchannel channelz id to corresponding reference string.
+	SubChans map[int64]string
+	// Sockets tracks the socket type children of this channel in the format of a map
+	// from socket channelz id to corresponding reference string.
+	// Note current grpc implementation doesn't allow channel having sockets directly,
+	// therefore, this is field is unused.
+	Sockets map[int64]string
+	// Trace contains the most recent traced events.
+	Trace *ChannelTrace
+}
+
+// SubChannelMetric defines the info channelz provides for a specific SubChannel,
+// which includes ChannelInternalMetric and channelz-specific data, such as
+// channelz id, child list, etc.
+type SubChannelMetric struct {
+	// ID is the channelz id of this subchannel.
+	ID int64
+	// RefName is the human readable reference string of this subchannel.
+	RefName string
+	// ChannelData contains subchannel internal metric reported by the subchannel
+	// through ChannelzMetric().
+	ChannelData *ChannelInternalMetric
+	// NestedChans tracks the nested channel type children of this subchannel in the format of
+	// a map from nested channel channelz id to corresponding reference string.
+	// Note current grpc implementation doesn't allow subchannel to have nested channels
+	// as children, therefore, this field is unused.
+	NestedChans map[int64]string
+	// SubChans tracks the subchannel type children of this subchannel in the format of a
+	// map from subchannel channelz id to corresponding reference string.
+	// Note current grpc implementation doesn't allow subchannel to have subchannels
+	// as children, therefore, this field is unused.
+	SubChans map[int64]string
+	// Sockets tracks the socket type children of this subchannel in the format of a map
+	// from socket channelz id to corresponding reference string.
+	Sockets map[int64]string
+	// Trace contains the most recent traced events.
+	Trace *ChannelTrace
+}
+
+// ChannelInternalMetric defines the struct that the implementor of Channel interface
+// should return from ChannelzMetric().
+type ChannelInternalMetric struct {
+	// current connectivity state of the channel.
+	State connectivity.State
+	// The target this channel originally tried to connect to.  May be absent
+	Target string
+	// The number of calls started on the channel.
+	CallsStarted int64
+	// The number of calls that have completed with an OK status.
+	CallsSucceeded int64
+	// The number of calls that have a completed with a non-OK status.
+	CallsFailed int64
+	// The last time a call was started on the channel.
+	LastCallStartedTimestamp time.Time
+}
+
+// ChannelTrace stores traced events on a channel/subchannel and related info.
+type ChannelTrace struct {
+	// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
+	EventNum int64
+	// CreationTime is the creation time of the trace.
+	CreationTime time.Time
+	// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
+	// oldest one)
+	Events []*TraceEvent
+}
+
+// TraceEvent represent a single trace event
+type TraceEvent struct {
+	// Desc is a simple description of the trace event.
+	Desc string
+	// Severity states the severity of this trace event.
+	Severity Severity
+	// Timestamp is the event time.
+	Timestamp time.Time
+	// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
+	// involved in this event.
+	// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
+	RefID int64
+	// RefName is the reference name for the entity that gets referenced in the event.
+	RefName string
+	// RefType indicates the referenced entity type, i.e Channel or SubChannel.
+	RefType RefChannelType
+}
+
+// Channel is the interface that should be satisfied in order to be tracked by
+// channelz as Channel or SubChannel.
+type Channel interface {
+	ChannelzMetric() *ChannelInternalMetric
+}
+
+type dummyChannel struct{}
+
+func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
+	return &ChannelInternalMetric{}
+}
+
+type channel struct {
+	refName     string
+	c           Channel
+	closeCalled bool
+	nestedChans map[int64]string
+	subChans    map[int64]string
+	id          int64
+	pid         int64
+	cm          *channelMap
+	trace       *channelTrace
+	// traceRefCount is the number of trace events that reference this channel.
+	// Non-zero traceRefCount means the trace of this channel cannot be deleted.
+	traceRefCount int32
+}
+
+func (c *channel) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *subChannel:
+		c.subChans[id] = v.refName
+	case *channel:
+		c.nestedChans[id] = v.refName
+	default:
+		logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+	}
+}
+
+func (c *channel) deleteChild(id int64) {
+	delete(c.subChans, id)
+	delete(c.nestedChans, id)
+	c.deleteSelfIfReady()
+}
+
+func (c *channel) triggerDelete() {
+	c.closeCalled = true
+	c.deleteSelfIfReady()
+}
+
+func (c *channel) getParentID() int64 {
+	return c.pid
+}
+
+// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
+// deleting the channel reference from its parent's child list.
+//
+// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
+// corresponding grpc object has been invoked, and the channel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (c *channel) deleteSelfFromTree() (deleted bool) {
+	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
+		return false
+	}
+	// not top channel
+	if c.pid != 0 {
+		c.cm.findEntry(c.pid).deleteChild(c.id)
+	}
+	return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
+// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
+// channel, and its memory will be garbage collected.
+//
+// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (c *channel) deleteSelfFromMap() (delete bool) {
+	if c.getTraceRefCount() != 0 {
+		c.c = &dummyChannel{}
+		return false
+	}
+	return true
+}
+
+// deleteSelfIfReady tries to delete the channel itself from the channelz database.
+// The delete process includes two steps:
+//  1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
+//     parent's child list.
+//  2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
+//     will return entry not found error.
+func (c *channel) deleteSelfIfReady() {
+	if !c.deleteSelfFromTree() {
+		return
+	}
+	if !c.deleteSelfFromMap() {
+		return
+	}
+	c.cm.deleteEntry(c.id)
+	c.trace.clear()
+}
+
+func (c *channel) getChannelTrace() *channelTrace {
+	return c.trace
+}
+
+func (c *channel) incrTraceRefCount() {
+	atomic.AddInt32(&c.traceRefCount, 1)
+}
+
+func (c *channel) decrTraceRefCount() {
+	atomic.AddInt32(&c.traceRefCount, -1)
+}
+
+func (c *channel) getTraceRefCount() int {
+	i := atomic.LoadInt32(&c.traceRefCount)
+	return int(i)
+}
+
+func (c *channel) getRefName() string {
+	return c.refName
+}
+
+type subChannel struct {
+	refName       string
+	c             Channel
+	closeCalled   bool
+	sockets       map[int64]string
+	id            int64
+	pid           int64
+	cm            *channelMap
+	trace         *channelTrace
+	traceRefCount int32
+}
+
+func (sc *subChannel) addChild(id int64, e entry) {
+	if v, ok := e.(*normalSocket); ok {
+		sc.sockets[id] = v.refName
+	} else {
+		logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+	}
+}
+
+func (sc *subChannel) deleteChild(id int64) {
+	delete(sc.sockets, id)
+	sc.deleteSelfIfReady()
+}
+
+func (sc *subChannel) triggerDelete() {
+	sc.closeCalled = true
+	sc.deleteSelfIfReady()
+}
+
+func (sc *subChannel) getParentID() int64 {
+	return sc.pid
+}
+
+// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
+// means deleting the subchannel reference from its parent's child list.
+//
+// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
+// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
+	if !sc.closeCalled || len(sc.sockets) != 0 {
+		return false
+	}
+	sc.cm.findEntry(sc.pid).deleteChild(sc.id)
+	return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
+// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
+// the subchannel, and its memory will be garbage collected.
+//
+// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (sc *subChannel) deleteSelfFromMap() (delete bool) {
+	if sc.getTraceRefCount() != 0 {
+		// free the grpc struct (i.e. addrConn)
+		sc.c = &dummyChannel{}
+		return false
+	}
+	return true
+}
+
+// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
+// The delete process includes two steps:
+//  1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
+//     its parent's child list.
+//  2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
+//     by id will return entry not found error.
+func (sc *subChannel) deleteSelfIfReady() {
+	if !sc.deleteSelfFromTree() {
+		return
+	}
+	if !sc.deleteSelfFromMap() {
+		return
+	}
+	sc.cm.deleteEntry(sc.id)
+	sc.trace.clear()
+}
+
+func (sc *subChannel) getChannelTrace() *channelTrace {
+	return sc.trace
+}
+
+func (sc *subChannel) incrTraceRefCount() {
+	atomic.AddInt32(&sc.traceRefCount, 1)
+}
+
+func (sc *subChannel) decrTraceRefCount() {
+	atomic.AddInt32(&sc.traceRefCount, -1)
+}
+
+func (sc *subChannel) getTraceRefCount() int {
+	i := atomic.LoadInt32(&sc.traceRefCount)
+	return int(i)
+}
+
+func (sc *subChannel) getRefName() string {
+	return sc.refName
+}
+
+// SocketMetric defines the info channelz provides for a specific Socket, which
+// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
+type SocketMetric struct {
+	// ID is the channelz id of this socket.
+	ID int64
+	// RefName is the human readable reference string of this socket.
+	RefName string
+	// SocketData contains socket internal metric reported by the socket through
+	// ChannelzMetric().
+	SocketData *SocketInternalMetric
+}
+
+// SocketInternalMetric defines the struct that the implementor of Socket interface
+// should return from ChannelzMetric().
+type SocketInternalMetric struct {
+	// The number of streams that have been started.
+	StreamsStarted int64
+	// The number of streams that have ended successfully:
+	// On client side, receiving frame with eos bit set.
+	// On server side, sending frame with eos bit set.
+	StreamsSucceeded int64
+	// The number of streams that have ended unsuccessfully:
+	// On client side, termination without receiving frame with eos bit set.
+	// On server side, termination without sending frame with eos bit set.
+	StreamsFailed int64
+	// The number of messages successfully sent on this socket.
+	MessagesSent     int64
+	MessagesReceived int64
+	// The number of keep alives sent.  This is typically implemented with HTTP/2
+	// ping messages.
+	KeepAlivesSent int64
+	// The last time a stream was created by this endpoint.  Usually unset for
+	// servers.
+	LastLocalStreamCreatedTimestamp time.Time
+	// The last time a stream was created by the remote endpoint.  Usually unset
+	// for clients.
+	LastRemoteStreamCreatedTimestamp time.Time
+	// The last time a message was sent by this endpoint.
+	LastMessageSentTimestamp time.Time
+	// The last time a message was received by this endpoint.
+	LastMessageReceivedTimestamp time.Time
+	// The amount of window, granted to the local endpoint by the remote endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	LocalFlowControlWindow int64
+	// The amount of window, granted to the remote endpoint by the local endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	RemoteFlowControlWindow int64
+	// The locally bound address.
+	LocalAddr net.Addr
+	// The remote bound address.  May be absent.
+	RemoteAddr net.Addr
+	// Optional, represents the name of the remote endpoint, if different than
+	// the original target name.
+	RemoteName    string
+	SocketOptions *SocketOptionData
+	Security      credentials.ChannelzSecurityValue
+}
+
+// Socket is the interface that should be satisfied in order to be tracked by
+// channelz as Socket.
+type Socket interface {
+	ChannelzMetric() *SocketInternalMetric
+}
+
+type listenSocket struct {
+	refName string
+	s       Socket
+	id      int64
+	pid     int64
+	cm      *channelMap
+}
+
+func (ls *listenSocket) addChild(id int64, e entry) {
+	logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+}
+
+func (ls *listenSocket) deleteChild(id int64) {
+	logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+}
+
+func (ls *listenSocket) triggerDelete() {
+	ls.cm.deleteEntry(ls.id)
+	ls.cm.findEntry(ls.pid).deleteChild(ls.id)
+}
+
+func (ls *listenSocket) deleteSelfIfReady() {
+	logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
+}
+
+func (ls *listenSocket) getParentID() int64 {
+	return ls.pid
+}
+
+type normalSocket struct {
+	refName string
+	s       Socket
+	id      int64
+	pid     int64
+	cm      *channelMap
+}
+
+func (ns *normalSocket) addChild(id int64, e entry) {
+	logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
+}
+
+func (ns *normalSocket) deleteChild(id int64) {
+	logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
+}
+
+func (ns *normalSocket) triggerDelete() {
+	ns.cm.deleteEntry(ns.id)
+	ns.cm.findEntry(ns.pid).deleteChild(ns.id)
+}
+
+func (ns *normalSocket) deleteSelfIfReady() {
+	logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
+}
+
+func (ns *normalSocket) getParentID() int64 {
+	return ns.pid
+}
+
+// ServerMetric defines the info channelz provides for a specific Server, which
+// includes ServerInternalMetric and channelz-specific data, such as channelz id,
+// child list, etc.
+type ServerMetric struct {
+	// ID is the channelz id of this server.
+	ID int64
+	// RefName is the human readable reference string of this server.
+	RefName string
+	// ServerData contains server internal metric reported by the server through
+	// ChannelzMetric().
+	ServerData *ServerInternalMetric
+	// ListenSockets tracks the listener socket type children of this server in the
+	// format of a map from socket channelz id to corresponding reference string.
+	ListenSockets map[int64]string
+}
+
+// ServerInternalMetric defines the struct that the implementor of Server interface
+// should return from ChannelzMetric().
+type ServerInternalMetric struct {
+	// The number of incoming calls started on the server.
+	CallsStarted int64
+	// The number of incoming calls that have completed with an OK status.
+	CallsSucceeded int64
+	// The number of incoming calls that have a completed with a non-OK status.
+	CallsFailed int64
+	// The last time a call was started on the server.
+	LastCallStartedTimestamp time.Time
+}
+
+// Server is the interface to be satisfied in order to be tracked by channelz as
+// Server.
+type Server interface {
+	ChannelzMetric() *ServerInternalMetric
+}
+
+type server struct {
+	refName       string
+	s             Server
+	closeCalled   bool
+	sockets       map[int64]string
+	listenSockets map[int64]string
+	id            int64
+	cm            *channelMap
+}
+
+func (s *server) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *normalSocket:
+		s.sockets[id] = v.refName
+	case *listenSocket:
+		s.listenSockets[id] = v.refName
+	default:
+		logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+	}
+}
+
+func (s *server) deleteChild(id int64) {
+	delete(s.sockets, id)
+	delete(s.listenSockets, id)
+	s.deleteSelfIfReady()
+}
+
+func (s *server) triggerDelete() {
+	s.closeCalled = true
+	s.deleteSelfIfReady()
+}
+
+func (s *server) deleteSelfIfReady() {
+	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
+		return
+	}
+	s.cm.deleteEntry(s.id)
+}
+
+func (s *server) getParentID() int64 {
+	return 0
+}
+
+type tracedChannel interface {
+	getChannelTrace() *channelTrace
+	incrTraceRefCount()
+	decrTraceRefCount()
+	getRefName() string
+}
+
+type channelTrace struct {
+	cm          *channelMap
+	createdTime time.Time
+	eventCount  int64
+	mu          sync.Mutex
+	events      []*TraceEvent
+}
+
+func (c *channelTrace) append(e *TraceEvent) {
+	c.mu.Lock()
+	if len(c.events) == getMaxTraceEntry() {
+		del := c.events[0]
+		c.events = c.events[1:]
+		if del.RefID != 0 {
+			// start recursive cleanup in a goroutine to not block the call originated from grpc.
+			go func() {
+				// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
+				c.cm.mu.Lock()
+				c.cm.decrTraceRefCount(del.RefID)
+				c.cm.mu.Unlock()
+			}()
+		}
+	}
+	e.Timestamp = time.Now()
+	c.events = append(c.events, e)
+	c.eventCount++
+	c.mu.Unlock()
+}
+
+func (c *channelTrace) clear() {
+	c.mu.Lock()
+	for _, e := range c.events {
+		if e.RefID != 0 {
+			// caller should have already held the c.cm.mu lock.
+			c.cm.decrTraceRefCount(e.RefID)
+		}
+	}
+	c.mu.Unlock()
+}
+
+// Severity is the severity level of a trace event.
+// The canonical enumeration of all valid values is here:
+// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
+type Severity int
+
+const (
+	// CtUnknown indicates unknown severity of a trace event.
+	CtUnknown Severity = iota
+	// CtInfo indicates info level severity of a trace event.
+	CtInfo
+	// CtWarning indicates warning level severity of a trace event.
+	CtWarning
+	// CtError indicates error level severity of a trace event.
+	CtError
+)
+
+// RefChannelType is the type of the entity being referenced in a trace event.
+type RefChannelType int
+
+const (
+	// RefUnknown indicates an unknown entity type, the zero value for this type.
+	RefUnknown RefChannelType = iota
+	// RefChannel indicates the referenced entity is a Channel.
+	RefChannel
+	// RefSubChannel indicates the referenced entity is a SubChannel.
+	RefSubChannel
+	// RefServer indicates the referenced entity is a Server.
+	RefServer
+	// RefListenSocket indicates the referenced entity is a ListenSocket.
+	RefListenSocket
+	// RefNormalSocket indicates the referenced entity is a NormalSocket.
+	RefNormalSocket
+)
+
+var refChannelTypeToString = map[RefChannelType]string{
+	RefUnknown:      "Unknown",
+	RefChannel:      "Channel",
+	RefSubChannel:   "SubChannel",
+	RefServer:       "Server",
+	RefListenSocket: "ListenSocket",
+	RefNormalSocket: "NormalSocket",
+}
+
+func (r RefChannelType) String() string {
+	return refChannelTypeToString[r]
+}
+
+func (c *channelTrace) dumpData() *ChannelTrace {
+	c.mu.Lock()
+	ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
+	ct.Events = c.events[:len(c.events)]
+	c.mu.Unlock()
+	return ct
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
new file mode 100644
index 000000000..1b1c4cce3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+type SocketOptionData struct {
+	Linger      *unix.Linger
+	RecvTimeout *unix.Timeval
+	SendTimeout *unix.Timeval
+	TCPInfo     *unix.TCPInfo
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+func (s *SocketOptionData) Getsockopt(fd uintptr) {
+	if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
+		s.Linger = v
+	}
+	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
+		s.RecvTimeout = v
+	}
+	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
+		s.SendTimeout = v
+	}
+	if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
+		s.TCPInfo = v
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
new file mode 100644
index 000000000..8b06eed1a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
@@ -0,0 +1,43 @@
+//go:build !linux
+// +build !linux
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"sync"
+)
+
+var once sync.Once
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+// Windows OS doesn't support Socket Option
+type SocketOptionData struct {
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+// Windows OS doesn't support Socket Option
+func (s *SocketOptionData) Getsockopt(fd uintptr) {
+	once.Do(func() {
+		logger.Warning("Channelz: socket options are not supported on non-linux environments")
+	})
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
new file mode 100644
index 000000000..8d194e44e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"syscall"
+)
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(socket interface{}) *SocketOptionData {
+	c, ok := socket.(syscall.Conn)
+	if !ok {
+		return nil
+	}
+	data := &SocketOptionData{}
+	if rawConn, err := c.SyscallConn(); err == nil {
+		rawConn.Control(data.Getsockopt)
+		return data
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
new file mode 100644
index 000000000..837ddc402
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
@@ -0,0 +1,27 @@
+//go:build !linux
+// +build !linux
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(c interface{}) *SocketOptionData {
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
new file mode 100644
index 000000000..32c9b5903
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+	"context"
+)
+
+// requestInfoKey is a struct to be used as the key to store RequestInfo in a
+// context.
+type requestInfoKey struct{}
+
+// NewRequestInfoContext creates a context with ri.
+func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context {
+	return context.WithValue(ctx, requestInfoKey{}, ri)
+}
+
+// RequestInfoFromContext extracts the RequestInfo from ctx.
+func RequestInfoFromContext(ctx context.Context) interface{} {
+	return ctx.Value(requestInfoKey{})
+}
+
+// clientHandshakeInfoKey is a struct used as the key to store
+// ClientHandshakeInfo in a context.
+type clientHandshakeInfoKey struct{}
+
+// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
+func ClientHandshakeInfoFromContext(ctx context.Context) interface{} {
+	return ctx.Value(clientHandshakeInfoKey{})
+}
+
+// NewClientHandshakeInfoContext creates a context with chi.
+func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context {
+	return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
new file mode 100644
index 000000000..25ade6230
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package credentials defines APIs for parsing SPIFFE ID.
+//
+// All APIs in this package are experimental.
+package credentials
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"net/url"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("credentials")
+
+// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
+// is invalid, return nil with warning.
+func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
+	if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
+		return nil
+	}
+	return SPIFFEIDFromCert(state.PeerCertificates[0])
+}
+
+// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
+// ID format is invalid, return nil with warning.
+func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
+	if cert == nil || cert.URIs == nil {
+		return nil
+	}
+	var spiffeID *url.URL
+	for _, uri := range cert.URIs {
+		if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
+			continue
+		}
+		// From this point, we assume the uri is intended for a SPIFFE ID.
+		if len(uri.String()) > 2048 {
+			logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
+			return nil
+		}
+		if len(uri.Host) == 0 || len(uri.Path) == 0 {
+			logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
+			return nil
+		}
+		if len(uri.Host) > 255 {
+			logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
+			return nil
+		}
+		// A valid SPIFFE certificate can only have exactly one URI SAN field.
+		if len(cert.URIs) > 1 {
+			logger.Warning("invalid SPIFFE ID: multiple URI SANs")
+			return nil
+		}
+		spiffeID = uri
+	}
+	return spiffeID
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
new file mode 100644
index 000000000..2919632d6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"net"
+	"syscall"
+)
+
+type sysConn = syscall.Conn
+
+// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
+// SyscallConn() (the method in interface syscall.Conn) is explicitly
+// implemented on this type,
+//
+// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
+// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
+// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
+// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
+// help here).
+type syscallConn struct {
+	net.Conn
+	// sysConn is a type alias of syscall.Conn. It's necessary because the name
+	// `Conn` collides with `net.Conn`.
+	sysConn
+}
+
+// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
+// implements syscall.Conn. rawConn will be used to support syscall, and newConn
+// will be used for read/write.
+//
+// This function returns newConn if rawConn doesn't implement syscall.Conn.
+func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
+	sysConn, ok := rawConn.(syscall.Conn)
+	if !ok {
+		return newConn
+	}
+	return &syscallConn{
+		Conn:    newConn,
+		sysConn: sysConn,
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go
new file mode 100644
index 000000000..f792fd22c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/util.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"crypto/tls"
+)
+
+const alpnProtoStrH2 = "h2"
+
+// AppendH2ToNextProtos appends h2 to next protos.
+func AppendH2ToNextProtos(ps []string) []string {
+	for _, p := range ps {
+		if p == alpnProtoStrH2 {
+			return ps
+		}
+	}
+	ret := make([]string, 0, len(ps)+1)
+	ret = append(ret, ps...)
+	return append(ret, alpnProtoStrH2)
+}
+
+// CloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func CloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+
+	return cfg.Clone()
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
new file mode 100644
index 000000000..5ba9d94d4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package envconfig contains grpc settings configured by environment variables.
+package envconfig
+
+import (
+	"os"
+	"strconv"
+	"strings"
+)
+
+var (
+	// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
+	TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
+	// AdvertiseCompressors is set if registered compressor should be advertised
+	// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
+	AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
+	// RingHashCap indicates the maximum ring size which defaults to 4096
+	// entries but may be overridden by setting the environment variable
+	// "GRPC_RING_HASH_CAP".  This does not override the default bounds
+	// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
+	RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
+)
+
+func boolFromEnv(envVar string, def bool) bool {
+	if def {
+		// The default is true; return true unless the variable is "false".
+		return !strings.EqualFold(os.Getenv(envVar), "false")
+	}
+	// The default is false; return false unless the variable is "true".
+	return strings.EqualFold(os.Getenv(envVar), "true")
+}
+
+func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
+	v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
+	if err != nil {
+		return def
+	}
+	if v < min {
+		return min
+	}
+	if v > max {
+		return max
+	}
+	return v
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
new file mode 100644
index 000000000..821dd0a7c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import "os"
+
+const (
+	envObservabilityConfig     = "GRPC_GCP_OBSERVABILITY_CONFIG"
+	envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
+)
+
+var (
+	// ObservabilityConfig is the json configuration for the gcp/observability
+	// package specified directly in the envObservabilityConfig env var.
+	ObservabilityConfig = os.Getenv(envObservabilityConfig)
+	// ObservabilityConfigFile is the json configuration for the
+	// gcp/observability specified in a file with the location specified in
+	// envObservabilityConfigFile env var.
+	ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
+)
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
new file mode 100644
index 000000000..04136882c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -0,0 +1,92 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import (
+	"os"
+)
+
+const (
+	// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
+	// Do not use this and read from env directly. Its value is read and kept in
+	// variable XDSBootstrapFileName.
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
+	// XDSBootstrapFileContentEnv is the env variable to set bootstrap file
+	// content. Do not use this and read from env directly. Its value is read
+	// and kept in variable XDSBootstrapFileContent.
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
+)
+
+var (
+	// XDSBootstrapFileName holds the name of the file which contains xDS
+	// bootstrap configuration. Users can specify the location of the bootstrap
+	// file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
+	// XDSBootstrapFileContent holds the content of the xDS bootstrap
+	// configuration. Users can specify the bootstrap config by setting the
+	// environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
+	// XDSRingHash indicates whether ring hash support is enabled, which can be
+	// disabled by setting the environment variable
+	// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
+	XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
+	// XDSClientSideSecurity is used to control processing of security
+	// configuration on the client-side.
+	//
+	// Note that there is no env var protection for the server-side because we
+	// have a brand new API on the server-side and users explicitly need to use
+	// the new API to get security integration on the server.
+	XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
+	// XDSAggregateAndDNS indicates whether processing of aggregated cluster
+	// and DNS cluster is enabled, which can be enabled by setting the
+	// environment variable
+	// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
+	// "true".
+	XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
+
+	// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
+	// which can be disabled by setting the environment variable
+	// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
+	XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
+	// XDSOutlierDetection indicates whether outlier detection support is
+	// enabled, which can be disabled by setting the environment variable
+	// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
+	XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
+	// XDSFederation indicates whether federation support is enabled, which can
+	// be enabled by setting the environment variable
+	// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
+	XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false)
+
+	// XDSRLS indicates whether processing of Cluster Specifier plugins and
+	// support for the RLS CLuster Specifier is enabled, which can be enabled by
+	// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
+	// "true".
+	XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false)
+
+	// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
+	C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
+)
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
new file mode 100644
index 000000000..b68e26a36
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
@@ -0,0 +1,126 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpclog (internal) defines depth logging for grpc.
+package grpclog
+
+import (
+	"os"
+)
+
+// Logger is the logger used for the non-depth log functions.
+var Logger LoggerV2
+
+// DepthLogger is the logger used for the depth log functions.
+var DepthLogger DepthLoggerV2
+
+// InfoDepth logs to the INFO log at the specified depth.
+func InfoDepth(depth int, args ...interface{}) {
+	if DepthLogger != nil {
+		DepthLogger.InfoDepth(depth, args...)
+	} else {
+		Logger.Infoln(args...)
+	}
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+func WarningDepth(depth int, args ...interface{}) {
+	if DepthLogger != nil {
+		DepthLogger.WarningDepth(depth, args...)
+	} else {
+		Logger.Warningln(args...)
+	}
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+func ErrorDepth(depth int, args ...interface{}) {
+	if DepthLogger != nil {
+		DepthLogger.ErrorDepth(depth, args...)
+	} else {
+		Logger.Errorln(args...)
+	}
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+func FatalDepth(depth int, args ...interface{}) {
+	if DepthLogger != nil {
+		DepthLogger.FatalDepth(depth, args...)
+	} else {
+		Logger.Fatalln(args...)
+	}
+	os.Exit(1)
+}
+
+// LoggerV2 does underlying logging work for grpclog.
+// This is a copy of the LoggerV2 defined in the external grpclog package. It
+// is defined here to avoid a circular dependency.
+type LoggerV2 interface {
+	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+	Info(args ...interface{})
+	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+	Infoln(args ...interface{})
+	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+	Infof(format string, args ...interface{})
+	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+	Warning(args ...interface{})
+	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+	Warningln(args ...interface{})
+	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+	Warningf(format string, args ...interface{})
+	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	Error(args ...interface{})
+	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	Errorln(args ...interface{})
+	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	Errorf(format string, args ...interface{})
+	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatal(args ...interface{})
+	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalln(args ...interface{})
+	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalf(format string, args ...interface{})
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l int) bool
+}
+
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
+// It is defined here to avoid a circular dependency.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type DepthLoggerV2 interface {
+	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	InfoDepth(depth int, args ...interface{})
+	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	WarningDepth(depth int, args ...interface{})
+	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	ErrorDepth(depth int, args ...interface{})
+	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	FatalDepth(depth int, args ...interface{})
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
new file mode 100644
index 000000000..82af70e96
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+	"fmt"
+)
+
+// PrefixLogger does logging with a prefix.
+//
+// Logging method on a nil logs without any prefix.
+type PrefixLogger struct {
+	logger DepthLoggerV2
+	prefix string
+}
+
+// Infof does info logging.
+func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
+	if pl != nil {
+		// Handle nil, so the tests can pass in a nil logger.
+		format = pl.prefix + format
+		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+// Warningf does warning logging.
+func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
+	if pl != nil {
+		format = pl.prefix + format
+		pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+// Errorf does error logging.
+func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
+	if pl != nil {
+		format = pl.prefix + format
+		pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+// Debugf does info logging at verbose level 2.
+func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
+	if !Logger.V(2) {
+		return
+	}
+	if pl != nil {
+		// Handle nil, so the tests can pass in a nil logger.
+		format = pl.prefix + format
+		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+// NewPrefixLogger creates a prefix logger with the given prefix.
+func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
+	return &PrefixLogger{logger: logger, prefix: prefix}
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
new file mode 100644
index 000000000..517ea7064
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcrand implements math/rand functions in a concurrent-safe way
+// with a global random source, independent of math/rand's global source.
+package grpcrand
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+)
+
+var (
+	r  = rand.New(rand.NewSource(time.Now().UnixNano()))
+	mu sync.Mutex
+)
+
+// Int implements rand.Int on the grpcrand global source.
+func Int() int {
+	mu.Lock()
+	defer mu.Unlock()
+	return r.Int()
+}
+
+// Int63n implements rand.Int63n on the grpcrand global source.
+func Int63n(n int64) int64 {
+	mu.Lock()
+	defer mu.Unlock()
+	return r.Int63n(n)
+}
+
+// Intn implements rand.Intn on the grpcrand global source.
+func Intn(n int) int {
+	mu.Lock()
+	defer mu.Unlock()
+	return r.Intn(n)
+}
+
+// Int31n implements rand.Int31n on the grpcrand global source.
+func Int31n(n int32) int32 {
+	mu.Lock()
+	defer mu.Unlock()
+	return r.Int31n(n)
+}
+
+// Float64 implements rand.Float64 on the grpcrand global source.
+func Float64() float64 {
+	mu.Lock()
+	defer mu.Unlock()
+	return r.Float64()
+}
+
+// Uint64 implements rand.Uint64 on the grpcrand global source.
+func Uint64() uint64 {
+	mu.Lock()
+	defer mu.Unlock()
+	return r.Uint64()
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
new file mode 100644
index 000000000..fbe697c37
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcsync implements additional synchronization primitives built upon
+// the sync package.
+package grpcsync
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// Event represents a one-time event that may occur in the future.
+type Event struct {
+	fired int32
+	c     chan struct{}
+	o     sync.Once
+}
+
+// Fire causes e to complete.  It is safe to call multiple times, and
+// concurrently.  It returns true iff this call to Fire caused the signaling
+// channel returned by Done to close.
+func (e *Event) Fire() bool {
+	ret := false
+	e.o.Do(func() {
+		atomic.StoreInt32(&e.fired, 1)
+		close(e.c)
+		ret = true
+	})
+	return ret
+}
+
+// Done returns a channel that will be closed when Fire is called.
+func (e *Event) Done() <-chan struct{} {
+	return e.c
+}
+
+// HasFired returns true if Fire has been called.
+func (e *Event) HasFired() bool {
+	return atomic.LoadInt32(&e.fired) == 1
+}
+
+// NewEvent returns a new, ready-to-use Event.
+func NewEvent() *Event {
+	return &Event{c: make(chan struct{})}
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
new file mode 100644
index 000000000..6635f7bca
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
@@ -0,0 +1,32 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+	"sync"
+)
+
+// OnceFunc returns a function wrapping f which ensures f is only executed
+// once even if the returned function is executed multiple times.
+func OnceFunc(f func()) func() {
+	var once sync.Once
+	return func() {
+		once.Do(f)
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
new file mode 100644
index 000000000..9f4090967
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
@@ -0,0 +1,47 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"strings"
+
+	"google.golang.org/grpc/internal/envconfig"
+)
+
+// RegisteredCompressorNames holds names of the registered compressors.
+var RegisteredCompressorNames []string
+
+// IsCompressorNameRegistered returns true when name is available in registry.
+func IsCompressorNameRegistered(name string) bool {
+	for _, compressor := range RegisteredCompressorNames {
+		if compressor == name {
+			return true
+		}
+	}
+	return false
+}
+
+// RegisteredCompressors returns a string of registered compressor names
+// separated by comma.
+func RegisteredCompressors() string {
+	if !envconfig.AdvertiseCompressors {
+		return ""
+	}
+	return strings.Join(RegisteredCompressorNames, ",")
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
new file mode 100644
index 000000000..b25b0baec
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"strconv"
+	"time"
+)
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+	if d%r > 0 {
+		return int64(d/r + 1)
+	}
+	return int64(d / r)
+}
+
+// EncodeDuration encodes the duration to the format grpc-timeout header
+// accepts.
+//
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+func EncodeDuration(t time.Duration) string {
+	// TODO: This is simplistic and not bandwidth efficient. Improve it.
+	if t <= 0 {
+		return "0n"
+	}
+	if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "n"
+	}
+	if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "u"
+	}
+	if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "m"
+	}
+	if d := div(t, time.Second); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "S"
+	}
+	if d := div(t, time.Minute); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "M"
+	}
+	// Note that maxTimeoutValue * time.Hour > MaxInt64.
+	return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
new file mode 100644
index 000000000..e2f948e8f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
@@ -0,0 +1,20 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcutil provides utility functions used across the gRPC codebase.
+package grpcutil
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
new file mode 100644
index 000000000..6f22bd891
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"context"
+
+	"google.golang.org/grpc/metadata"
+)
+
+type mdExtraKey struct{}
+
+// WithExtraMetadata creates a new context with incoming md attached.
+func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
+	return context.WithValue(ctx, mdExtraKey{}, md)
+}
+
+// ExtraMetadata returns the incoming metadata in ctx if it exists.  The
+// returned MD should not be modified. Writing to it may cause races.
+// Modification should be made to copies of the returned MD.
+func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
+	md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
+	return
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
new file mode 100644
index 000000000..ec62b4775
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -0,0 +1,88 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"errors"
+	"strings"
+)
+
+// ParseMethod splits service and method from the input. It expects format
+// "/service/method".
+func ParseMethod(methodName string) (service, method string, _ error) {
+	if !strings.HasPrefix(methodName, "/") {
+		return "", "", errors.New("invalid method name: should start with /")
+	}
+	methodName = methodName[1:]
+
+	pos := strings.LastIndex(methodName, "/")
+	if pos < 0 {
+		return "", "", errors.New("invalid method name: suffix /method is missing")
+	}
+	return methodName[:pos], methodName[pos+1:], nil
+}
+
+// baseContentType is the base content-type for gRPC.  This is a valid
+// content-type on it's own, but can also include a content-subtype such as
+// "proto" as a suffix after "+" or ";".  See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+// for more details.
+const baseContentType = "application/grpc"
+
+// ContentSubtype returns the content-subtype for the given content-type.  The
+// given content-type must be a valid content-type that starts with
+// "application/grpc". A content-subtype will follow "application/grpc" after a
+// "+" or ";". See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If contentType is not a valid content-type for gRPC, the boolean
+// will be false, otherwise true. If content-type == "application/grpc",
+// "application/grpc+", or "application/grpc;", the boolean will be true,
+// but no content-subtype will be returned.
+//
+// contentType is assumed to be lowercase already.
+func ContentSubtype(contentType string) (string, bool) {
+	if contentType == baseContentType {
+		return "", true
+	}
+	if !strings.HasPrefix(contentType, baseContentType) {
+		return "", false
+	}
+	// guaranteed since != baseContentType and has baseContentType prefix
+	switch contentType[len(baseContentType)] {
+	case '+', ';':
+		// this will return true for "application/grpc+" or "application/grpc;"
+		// which the previous validContentType function tested to be valid, so we
+		// just say that no content-subtype is specified in this case
+		return contentType[len(baseContentType)+1:], true
+	default:
+		return "", false
+	}
+}
+
+// ContentType builds full content type with the given sub-type.
+//
+// contentSubtype is assumed to be lowercase
+func ContentType(contentSubtype string) string {
+	if contentSubtype == "" {
+		return baseContentType
+	}
+	return baseContentType + "+" + contentSubtype
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
new file mode 100644
index 000000000..7a092b2b8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
@@ -0,0 +1,31 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import "regexp"
+
+// FullMatchWithRegex returns whether the full text matches the regex provided.
+func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
+	if len(text) == 0 {
+		return re.MatchString(text)
+	}
+	re.Longest()
+	rem := re.FindString(text)
+	return len(rem) == len(text)
+}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
new file mode 100644
index 000000000..0a76d9de6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains gRPC-internal code, to avoid polluting
+// the godoc of the top-level grpc package.  It must not import any grpc
+// symbols to avoid circular dependencies.
+package internal
+
+import (
+	"context"
+	"time"
+
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+var (
+	// WithHealthCheckFunc is set by dialoptions.go
+	WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
+	// HealthCheckFunc is used to provide client-side LB channel health checking
+	HealthCheckFunc HealthChecker
+	// BalancerUnregister is exported by package balancer to unregister a balancer.
+	BalancerUnregister func(name string)
+	// KeepaliveMinPingTime is the minimum ping interval.  This must be 10s by
+	// default, but tests may wish to set it lower for convenience.
+	KeepaliveMinPingTime = 10 * time.Second
+	// ParseServiceConfig parses a JSON representation of the service config.
+	ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
+	// EqualServiceConfigForTesting is for testing service config generation and
+	// parsing. Both a and b should be returned by ParseServiceConfig.
+	// This function compares the config without rawJSON stripped, in case the
+	// there's difference in white space.
+	EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
+	// GetCertificateProviderBuilder returns the registered builder for the
+	// given name. This is set by package certprovider for use from xDS
+	// bootstrap code while parsing certificate provider configs in the
+	// bootstrap file.
+	GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder
+	// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
+	// stored in the passed in attributes. This is set by
+	// credentials/xds/xds.go.
+	GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo
+	// GetServerCredentials returns the transport credentials configured on a
+	// gRPC server. An xDS-enabled server needs to know what type of credentials
+	// is configured on the underlying gRPC server. This is set by server.go.
+	GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
+	// DrainServerTransports initiates a graceful close of existing connections
+	// on a gRPC server accepted on the provided listener address. An
+	// xDS-enabled server invokes this method on a grpc.Server when a particular
+	// listener moves to "not-serving" mode.
+	DrainServerTransports interface{} // func(*grpc.Server, string)
+	// AddGlobalServerOptions adds an array of ServerOption that will be
+	// effective globally for newly created servers. The priority will be: 1.
+	// user-provided; 2. this method; 3. default values.
+	AddGlobalServerOptions interface{} // func(opt ...ServerOption)
+	// ClearGlobalServerOptions clears the array of extra ServerOption. This
+	// method is useful in testing and benchmarking.
+	ClearGlobalServerOptions func()
+	// AddGlobalDialOptions adds an array of DialOption that will be effective
+	// globally for newly created client channels. The priority will be: 1.
+	// user-provided; 2. this method; 3. default values.
+	AddGlobalDialOptions interface{} // func(opt ...DialOption)
+	// ClearGlobalDialOptions clears the array of extra DialOption. This
+	// method is useful in testing and benchmarking.
+	ClearGlobalDialOptions func()
+	// JoinDialOptions combines the dial options passed as arguments into a
+	// single dial option.
+	JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
+	// JoinServerOptions combines the server options passed as arguments into a
+	// single server option.
+	JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
+
+	// WithBinaryLogger returns a DialOption that specifies the binary logger
+	// for a ClientConn.
+	WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
+	// BinaryLogger returns a ServerOption that can set the binary logger for a
+	// server.
+	BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
+
+	// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
+	// the provided xds bootstrap config instead of the global configuration from
+	// the supported environment variables.  The resolver.Builder is meant to be
+	// used in conjunction with the grpc.WithResolvers DialOption.
+	//
+	// Testing Only
+	//
+	// This function should ONLY be used for testing and may not work with some
+	// other features, including the CSDS service.
+	NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error)
+
+	// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
+	// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
+	// variable.
+	//
+	// TODO: Remove this function once the RLS env var is removed.
+	RegisterRLSClusterSpecifierPluginForTesting func()
+
+	// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
+	// Specifier Plugin for testing purposes. This is needed because there is no way
+	// to unregister the RLS Cluster Specifier Plugin after registering it solely
+	// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
+	//
+	// TODO: Remove this function once the RLS env var is removed.
+	UnregisterRLSClusterSpecifierPluginForTesting func()
+
+	// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
+	// purposes, regardless of the RBAC environment variable.
+	//
+	// TODO: Remove this function once the RBAC env var is removed.
+	RegisterRBACHTTPFilterForTesting func()
+
+	// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
+	// testing purposes. This is needed because there is no way to unregister the
+	// HTTP Filter after registering it solely for testing purposes using
+	// RegisterRBACHTTPFilterForTesting().
+	//
+	// TODO: Remove this function once the RBAC env var is removed.
+	UnregisterRBACHTTPFilterForTesting func()
+)
+
+// HealthChecker defines the signature of the client-side LB channel health checking function.
+//
+// The implementation is expected to create a health checking RPC stream by
+// calling newStream(), watch for the health status of serviceName, and report
+// it's health back by calling setConnectivityState().
+//
+// The health checking protocol is defined at:
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
+type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
+
+const (
+	// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
+	CredsBundleModeFallback = "fallback"
+	// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
+	// mode.
+	CredsBundleModeBalancer = "balancer"
+	// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
+	// that supports backend returned by grpclb balancer.
+	CredsBundleModeBackendFromBalancer = "backend-from-balancer"
+)
+
+// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
+//
+// It currently has an experimental suffix which would be removed once
+// end-to-end testing of the policy is completed.
+const RLSLoadBalancingPolicyName = "rls_experimental"
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
new file mode 100644
index 000000000..b2980f8ac
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -0,0 +1,120 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package metadata contains functions to set and get metadata from addresses.
+//
+// This package is experimental.
+package metadata
+
+import (
+	"fmt"
+	"strings"
+
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+)
+
+type mdKeyType string
+
+const mdKey = mdKeyType("grpc.internal.address.metadata")
+
+type mdValue metadata.MD
+
+func (m mdValue) Equal(o interface{}) bool {
+	om, ok := o.(mdValue)
+	if !ok {
+		return false
+	}
+	if len(m) != len(om) {
+		return false
+	}
+	for k, v := range m {
+		ov := om[k]
+		if len(ov) != len(v) {
+			return false
+		}
+		for i, ve := range v {
+			if ov[i] != ve {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Get returns the metadata of addr.
+func Get(addr resolver.Address) metadata.MD {
+	attrs := addr.Attributes
+	if attrs == nil {
+		return nil
+	}
+	md, _ := attrs.Value(mdKey).(mdValue)
+	return metadata.MD(md)
+}
+
+// Set sets (overrides) the metadata in addr.
+//
+// When a SubConn is created with this address, the RPCs sent on it will all
+// have this metadata.
+func Set(addr resolver.Address, md metadata.MD) resolver.Address {
+	addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
+	return addr
+}
+
+// Validate returns an error if the input md contains invalid keys or values.
+//
+// If the header is not a pseudo-header, the following items are checked:
+// - header names must contain one or more characters from this set [0-9 a-z _ - .].
+// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
+// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
+func Validate(md metadata.MD) error {
+	for k, vals := range md {
+		// pseudo-header will be ignored
+		if k[0] == ':' {
+			continue
+		}
+		// check key, for i that saving a conversion if not using for range
+		for i := 0; i < len(k); i++ {
+			r := k[i]
+			if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
+				return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
+			}
+		}
+		if strings.HasSuffix(k, "-bin") {
+			continue
+		}
+		// check value
+		for _, val := range vals {
+			if hasNotPrintable(val) {
+				return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
+			}
+		}
+	}
+	return nil
+}
+
+// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
+func hasNotPrintable(msg string) bool {
+	// for i that saving a conversion if not using for range
+	for i := 0; i < len(msg); i++ {
+		if msg[i] < 0x20 || msg[i] > 0x7E {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
new file mode 100644
index 000000000..0177af4b5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
@@ -0,0 +1,82 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pretty defines helper functions to pretty-print structs for logging.
+package pretty
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+
+	"github.com/golang/protobuf/jsonpb"
+	protov1 "github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/encoding/protojson"
+	protov2 "google.golang.org/protobuf/proto"
+)
+
+const jsonIndent = "  "
+
+// ToJSON marshals the input into a json string.
+//
+// If marshal fails, it falls back to fmt.Sprintf("%+v").
+func ToJSON(e interface{}) string {
+	switch ee := e.(type) {
+	case protov1.Message:
+		mm := jsonpb.Marshaler{Indent: jsonIndent}
+		ret, err := mm.MarshalToString(ee)
+		if err != nil {
+			// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
+			// messages are not imported, and this will fail because the message
+			// is not found.
+			return fmt.Sprintf("%+v", ee)
+		}
+		return ret
+	case protov2.Message:
+		mm := protojson.MarshalOptions{
+			Multiline: true,
+			Indent:    jsonIndent,
+		}
+		ret, err := mm.Marshal(ee)
+		if err != nil {
+			// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
+			// messages are not imported, and this will fail because the message
+			// is not found.
+			return fmt.Sprintf("%+v", ee)
+		}
+		return string(ret)
+	default:
+		ret, err := json.MarshalIndent(ee, "", jsonIndent)
+		if err != nil {
+			return fmt.Sprintf("%+v", ee)
+		}
+		return string(ret)
+	}
+}
+
+// FormatJSON formats the input json bytes with indentation.
+//
+// If Indent fails, it returns the unchanged input as string.
+func FormatJSON(b []byte) string {
+	var out bytes.Buffer
+	err := json.Indent(&out, b, "", jsonIndent)
+	if err != nil {
+		return string(b)
+	}
+	return out.String()
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
new file mode 100644
index 000000000..c7a18a948
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver provides internal resolver-related functionality.
+package resolver
+
+import (
+	"context"
+	"sync"
+
+	"google.golang.org/grpc/internal/serviceconfig"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+)
+
+// ConfigSelector controls what configuration to use for every RPC.
+type ConfigSelector interface {
+	// Selects the configuration for the RPC, or terminates it using the error.
+	// This error will be converted by the gRPC library to a status error with
+	// code UNKNOWN if it is not returned as a status error.
+	SelectConfig(RPCInfo) (*RPCConfig, error)
+}
+
+// RPCInfo contains RPC information needed by a ConfigSelector.
+type RPCInfo struct {
+	// Context is the user's context for the RPC and contains headers and
+	// application timeout.  It is passed for interception purposes and for
+	// efficiency reasons.  SelectConfig should not be blocking.
+	Context context.Context
+	Method  string // i.e. "/Service/Method"
+}
+
+// RPCConfig describes the configuration to use for each RPC.
+type RPCConfig struct {
+	// The context to use for the remainder of the RPC; can pass info to LB
+	// policy or affect timeout or metadata.
+	Context      context.Context
+	MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
+	OnCommitted  func()                     // Called when the RPC has been committed (retries no longer possible)
+	Interceptor  ClientInterceptor
+}
+
+// ClientStream is the same as grpc.ClientStream, but defined here for circular
+// dependency reasons.
+type ClientStream interface {
+	// Header returns the header metadata received from the server if there
+	// is any. It blocks if the metadata is not ready to read.
+	Header() (metadata.MD, error)
+	// Trailer returns the trailer metadata from the server, if there is any.
+	// It must only be called after stream.CloseAndRecv has returned, or
+	// stream.Recv has returned a non-nil error (including io.EOF).
+	Trailer() metadata.MD
+	// CloseSend closes the send direction of the stream. It closes the stream
+	// when non-nil error is met. It is also not safe to call CloseSend
+	// concurrently with SendMsg.
+	CloseSend() error
+	// Context returns the context for this stream.
+	//
+	// It should not be called until after Header or RecvMsg has returned. Once
+	// called, subsequent client-side retries are disabled.
+	Context() context.Context
+	// SendMsg is generally called by generated code. On error, SendMsg aborts
+	// the stream. If the error was generated by the client, the status is
+	// returned directly; otherwise, io.EOF is returned and the status of
+	// the stream may be discovered using RecvMsg.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the server. An
+	// untimely stream closure may result in lost messages. To ensure delivery,
+	// users should ensure the RPC completed successfully using RecvMsg.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines. It is also
+	// not safe to call CloseSend concurrently with SendMsg.
+	SendMsg(m interface{}) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the stream completes successfully. On
+	// any other error, the stream is aborted and the error contains the RPC
+	// status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m interface{}) error
+}
+
+// ClientInterceptor is an interceptor for gRPC client streams.
+type ClientInterceptor interface {
+	// NewStream produces a ClientStream for an RPC which may optionally use
+	// the provided function to produce a stream for delegation.  Note:
+	// RPCInfo.Context should not be used (will be nil).
+	//
+	// done is invoked when the RPC is finished using its connection, or could
+	// not be assigned a connection.  RPC operations may still occur on
+	// ClientStream after done is called, since the interceptor is invoked by
+	// application-layer operations.  done must never be nil when called.
+	NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
+}
+
+// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
+type ServerInterceptor interface {
+	// AllowRPC checks if an incoming RPC is allowed to proceed based on
+	// information about connection RPC was received on, and HTTP Headers. This
+	// information will be piped into context.
+	AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
+}
+
+type csKeyType string
+
+const csKey = csKeyType("grpc.internal.resolver.configSelector")
+
+// SetConfigSelector sets the config selector in state and returns the new
+// state.
+func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
+	state.Attributes = state.Attributes.WithValue(csKey, cs)
+	return state
+}
+
+// GetConfigSelector retrieves the config selector from state, if present, and
+// returns it or nil if absent.
+func GetConfigSelector(state resolver.State) ConfigSelector {
+	cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
+	return cs
+}
+
+// SafeConfigSelector allows for safe switching of ConfigSelector
+// implementations such that previous values are guaranteed to not be in use
+// when UpdateConfigSelector returns.
+type SafeConfigSelector struct {
+	mu sync.RWMutex
+	cs ConfigSelector
+}
+
+// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
+// all uses of the previous ConfigSelector have completed.
+func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
+	scs.mu.Lock()
+	defer scs.mu.Unlock()
+	scs.cs = cs
+}
+
+// SelectConfig defers to the current ConfigSelector in scs.
+func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
+	scs.mu.RLock()
+	defer scs.mu.RUnlock()
+	return scs.cs.SelectConfig(r)
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
new file mode 100644
index 000000000..09a667f33
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -0,0 +1,458 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package dns implements a dns resolver to be installed as the default resolver
+// in grpc.
+package dns
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/envconfig"
+	"google.golang.org/grpc/internal/grpcrand"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
+// addresses from SRV records.  Must not be changed after init time.
+var EnableSRVLookups = false
+
+var logger = grpclog.Component("dns")
+
+// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
+// single variable for testing the resolver?
+var (
+	newTimer           = time.NewTimer
+	newTimerDNSResRate = time.NewTimer
+)
+
+func init() {
+	resolver.Register(NewBuilder())
+}
+
+const (
+	defaultPort       = "443"
+	defaultDNSSvrPort = "53"
+	golang            = "GO"
+	// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
+	txtPrefix = "_grpc_config."
+	// In DNS, service config is encoded in a TXT record via the mechanism
+	// described in RFC-1464 using the attribute name grpc_config.
+	txtAttribute = "grpc_config="
+)
+
+var (
+	errMissingAddr = errors.New("dns resolver: missing address")
+
+	// Addresses ending with a colon that is supposed to be the separator
+	// between host and port is not allowed.  E.g. "::" is a valid address as
+	// it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
+	// a colon as the host and port separator
+	errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
+)
+
+var (
+	defaultResolver netResolver = net.DefaultResolver
+	// To prevent excessive re-resolution, we enforce a rate limit on DNS
+	// resolution requests.
+	minDNSResRate = 30 * time.Second
+)
+
+var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
+	return func(ctx context.Context, network, address string) (net.Conn, error) {
+		var dialer net.Dialer
+		return dialer.DialContext(ctx, network, authority)
+	}
+}
+
+var customAuthorityResolver = func(authority string) (netResolver, error) {
+	host, port, err := parseTarget(authority, defaultDNSSvrPort)
+	if err != nil {
+		return nil, err
+	}
+
+	authorityWithPort := net.JoinHostPort(host, port)
+
+	return &net.Resolver{
+		PreferGo: true,
+		Dial:     customAuthorityDialler(authorityWithPort),
+	}, nil
+}
+
+// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
+func NewBuilder() resolver.Builder {
+	return &dnsBuilder{}
+}
+
+type dnsBuilder struct{}
+
+// Build creates and starts a DNS resolver that watches the name resolution of the target.
+func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+	host, port, err := parseTarget(target.Endpoint(), defaultPort)
+	if err != nil {
+		return nil, err
+	}
+
+	// IP address.
+	if ipAddr, ok := formatIP(host); ok {
+		addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
+		cc.UpdateState(resolver.State{Addresses: addr})
+		return deadResolver{}, nil
+	}
+
+	// DNS address (non-IP).
+	ctx, cancel := context.WithCancel(context.Background())
+	d := &dnsResolver{
+		host:                 host,
+		port:                 port,
+		ctx:                  ctx,
+		cancel:               cancel,
+		cc:                   cc,
+		rn:                   make(chan struct{}, 1),
+		disableServiceConfig: opts.DisableServiceConfig,
+	}
+
+	if target.URL.Host == "" {
+		d.resolver = defaultResolver
+	} else {
+		d.resolver, err = customAuthorityResolver(target.URL.Host)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	d.wg.Add(1)
+	go d.watcher()
+	return d, nil
+}
+
+// Scheme returns the naming scheme of this resolver builder, which is "dns".
+func (b *dnsBuilder) Scheme() string {
+	return "dns"
+}
+
+type netResolver interface {
+	LookupHost(ctx context.Context, host string) (addrs []string, err error)
+	LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
+	LookupTXT(ctx context.Context, name string) (txts []string, err error)
+}
+
+// deadResolver is a resolver that does nothing.
+type deadResolver struct{}
+
+func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (deadResolver) Close() {}
+
+// dnsResolver watches for the name resolution update for a non-IP target.
+type dnsResolver struct {
+	host     string
+	port     string
+	resolver netResolver
+	ctx      context.Context
+	cancel   context.CancelFunc
+	cc       resolver.ClientConn
+	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
+	rn chan struct{}
+	// wg is used to enforce Close() to return after the watcher() goroutine has finished.
+	// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
+	// replace the real lookup functions with mocked ones to facilitate testing.
+	// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
+	// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
+	// has data race with replaceNetFunc (WRITE the lookup function pointers).
+	wg                   sync.WaitGroup
+	disableServiceConfig bool
+}
+
+// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
+func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
+	select {
+	case d.rn <- struct{}{}:
+	default:
+	}
+}
+
+// Close closes the dnsResolver.
+func (d *dnsResolver) Close() {
+	d.cancel()
+	d.wg.Wait()
+}
+
+func (d *dnsResolver) watcher() {
+	defer d.wg.Done()
+	backoffIndex := 1
+	for {
+		state, err := d.lookup()
+		if err != nil {
+			// Report error to the underlying grpc.ClientConn.
+			d.cc.ReportError(err)
+		} else {
+			err = d.cc.UpdateState(*state)
+		}
+
+		var timer *time.Timer
+		if err == nil {
+			// Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least
+			// to prevent constantly re-resolving.
+			backoffIndex = 1
+			timer = newTimerDNSResRate(minDNSResRate)
+			select {
+			case <-d.ctx.Done():
+				timer.Stop()
+				return
+			case <-d.rn:
+			}
+		} else {
+			// Poll on an error found in DNS Resolver or an error received from ClientConn.
+			timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
+			backoffIndex++
+		}
+		select {
+		case <-d.ctx.Done():
+			timer.Stop()
+			return
+		case <-timer.C:
+		}
+	}
+}
+
+func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
+	if !EnableSRVLookups {
+		return nil, nil
+	}
+	var newAddrs []resolver.Address
+	_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
+	if err != nil {
+		err = handleDNSError(err, "SRV") // may become nil
+		return nil, err
+	}
+	for _, s := range srvs {
+		lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
+		if err != nil {
+			err = handleDNSError(err, "A") // may become nil
+			if err == nil {
+				// If there are other SRV records, look them up and ignore this
+				// one that does not exist.
+				continue
+			}
+			return nil, err
+		}
+		for _, a := range lbAddrs {
+			ip, ok := formatIP(a)
+			if !ok {
+				return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
+			}
+			addr := ip + ":" + strconv.Itoa(int(s.Port))
+			newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
+		}
+	}
+	return newAddrs, nil
+}
+
+func handleDNSError(err error, lookupType string) error {
+	if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
+		// Timeouts and temporary errors should be communicated to gRPC to
+		// attempt another DNS query (with backoff).  Other errors should be
+		// suppressed (they may represent the absence of a TXT record).
+		return nil
+	}
+	if err != nil {
+		err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
+		logger.Info(err)
+	}
+	return err
+}
+
+func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
+	ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
+	if err != nil {
+		if envconfig.TXTErrIgnore {
+			return nil
+		}
+		if err = handleDNSError(err, "TXT"); err != nil {
+			return &serviceconfig.ParseResult{Err: err}
+		}
+		return nil
+	}
+	var res string
+	for _, s := range ss {
+		res += s
+	}
+
+	// TXT record must have "grpc_config=" attribute in order to be used as service config.
+	if !strings.HasPrefix(res, txtAttribute) {
+		logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+		// This is not an error; it is the equivalent of not having a service config.
+		return nil
+	}
+	sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
+	return d.cc.ParseServiceConfig(sc)
+}
+
+func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
+	addrs, err := d.resolver.LookupHost(d.ctx, d.host)
+	if err != nil {
+		err = handleDNSError(err, "A")
+		return nil, err
+	}
+	newAddrs := make([]resolver.Address, 0, len(addrs))
+	for _, a := range addrs {
+		ip, ok := formatIP(a)
+		if !ok {
+			return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
+		}
+		addr := ip + ":" + d.port
+		newAddrs = append(newAddrs, resolver.Address{Addr: addr})
+	}
+	return newAddrs, nil
+}
+
+func (d *dnsResolver) lookup() (*resolver.State, error) {
+	srv, srvErr := d.lookupSRV()
+	addrs, hostErr := d.lookupHost()
+	if hostErr != nil && (srvErr != nil || len(srv) == 0) {
+		return nil, hostErr
+	}
+
+	state := resolver.State{Addresses: addrs}
+	if len(srv) > 0 {
+		state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
+	}
+	if !d.disableServiceConfig {
+		state.ServiceConfig = d.lookupTXT()
+	}
+	return &state, nil
+}
+
+// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
+// If addr is an IPv4 address, return the addr and ok = true.
+// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
+func formatIP(addr string) (addrIP string, ok bool) {
+	ip := net.ParseIP(addr)
+	if ip == nil {
+		return "", false
+	}
+	if ip.To4() != nil {
+		return addr, true
+	}
+	return "[" + addr + "]", true
+}
+
+// parseTarget takes the user input target string and default port, returns formatted host and port info.
+// If target doesn't specify a port, set the port to be the defaultPort.
+// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
+// are stripped when setting the host.
+// examples:
+// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
+// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
+// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
+// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
+func parseTarget(target, defaultPort string) (host, port string, err error) {
+	if target == "" {
+		return "", "", errMissingAddr
+	}
+	if ip := net.ParseIP(target); ip != nil {
+		// target is an IPv4 or IPv6(without brackets) address
+		return target, defaultPort, nil
+	}
+	if host, port, err = net.SplitHostPort(target); err == nil {
+		if port == "" {
+			// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
+			return "", "", errEndsWithColon
+		}
+		// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
+		if host == "" {
+			// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
+			host = "localhost"
+		}
+		return host, port, nil
+	}
+	if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
+		// target doesn't have port
+		return host, port, nil
+	}
+	return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
+}
+
+type rawChoice struct {
+	ClientLanguage *[]string        `json:"clientLanguage,omitempty"`
+	Percentage     *int             `json:"percentage,omitempty"`
+	ClientHostName *[]string        `json:"clientHostName,omitempty"`
+	ServiceConfig  *json.RawMessage `json:"serviceConfig,omitempty"`
+}
+
+func containsString(a *[]string, b string) bool {
+	if a == nil {
+		return true
+	}
+	for _, c := range *a {
+		if c == b {
+			return true
+		}
+	}
+	return false
+}
+
+func chosenByPercentage(a *int) bool {
+	if a == nil {
+		return true
+	}
+	return grpcrand.Intn(100)+1 <= *a
+}
+
+func canaryingSC(js string) string {
+	if js == "" {
+		return ""
+	}
+	var rcs []rawChoice
+	err := json.Unmarshal([]byte(js), &rcs)
+	if err != nil {
+		logger.Warningf("dns: error parsing service config json: %v", err)
+		return ""
+	}
+	cliHostname, err := os.Hostname()
+	if err != nil {
+		logger.Warningf("dns: error getting client hostname: %v", err)
+		return ""
+	}
+	var sc string
+	for _, c := range rcs {
+		if !containsString(c.ClientLanguage, golang) ||
+			!chosenByPercentage(c.Percentage) ||
+			!containsString(c.ClientHostName, cliHostname) ||
+			c.ServiceConfig == nil {
+			continue
+		}
+		sc = string(*c.ServiceConfig)
+		break
+	}
+	return sc
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
new file mode 100644
index 000000000..afac56572
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package passthrough implements a pass-through resolver. It sends the target
+// name without scheme back to gRPC as resolved address.
+package passthrough
+
+import (
+	"errors"
+
+	"google.golang.org/grpc/resolver"
+)
+
+const scheme = "passthrough"
+
+type passthroughBuilder struct{}
+
+func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+	if target.Endpoint() == "" && opts.Dialer == nil {
+		return nil, errors.New("passthrough: received empty target in Build()")
+	}
+	r := &passthroughResolver{
+		target: target,
+		cc:     cc,
+	}
+	r.start()
+	return r, nil
+}
+
+func (*passthroughBuilder) Scheme() string {
+	return scheme
+}
+
+type passthroughResolver struct {
+	target resolver.Target
+	cc     resolver.ClientConn
+}
+
+func (r *passthroughResolver) start() {
+	r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
+}
+
+func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
+
+func (*passthroughResolver) Close() {}
+
+func init() {
+	resolver.Register(&passthroughBuilder{})
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
new file mode 100644
index 000000000..160911687
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package unix implements a resolver for unix targets.
+package unix
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/internal/transport/networktype"
+	"google.golang.org/grpc/resolver"
+)
+
+const unixScheme = "unix"
+const unixAbstractScheme = "unix-abstract"
+
+type builder struct {
+	scheme string
+}
+
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
+	if target.URL.Host != "" {
+		return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
+	}
+
+	// gRPC was parsing the dial target manually before PR #4817, and we
+	// switched to using url.Parse() in that PR. To avoid breaking existing
+	// resolver implementations we ended up stripping the leading "/" from the
+	// endpoint. This obviously does not work for the "unix" scheme. Hence we
+	// end up using the parsed URL instead.
+	endpoint := target.URL.Path
+	if endpoint == "" {
+		endpoint = target.URL.Opaque
+	}
+	addr := resolver.Address{Addr: endpoint}
+	if b.scheme == unixAbstractScheme {
+		// We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
+		// not want trailing \0 in address.
+		addr.Addr = "@" + addr.Addr
+	}
+	cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
+	return &nopResolver{}, nil
+}
+
+func (b *builder) Scheme() string {
+	return b.scheme
+}
+
+type nopResolver struct {
+}
+
+func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (*nopResolver) Close() {}
+
+func init() {
+	resolver.Register(&builder{scheme: unixScheme})
+	resolver.Register(&builder{scheme: unixAbstractScheme})
+}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
new file mode 100644
index 000000000..51e733e49
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig contains utility functions to parse service config.
+package serviceconfig
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	externalserviceconfig "google.golang.org/grpc/serviceconfig"
+)
+
+var logger = grpclog.Component("core")
+
+// BalancerConfig wraps the name and config associated with one load balancing
+// policy. It corresponds to a single entry of the loadBalancingConfig field
+// from ServiceConfig.
+//
+// It implements the json.Unmarshaler interface.
+//
+// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
+type BalancerConfig struct {
+	Name   string
+	Config externalserviceconfig.LoadBalancingConfig
+}
+
+type intermediateBalancerConfig []map[string]json.RawMessage
+
+// MarshalJSON implements the json.Marshaler interface.
+//
+// It marshals the balancer and config into a length-1 slice
+// ([]map[string]config).
+func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
+	if bc.Config == nil {
+		// If config is nil, return empty config `{}`.
+		return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
+	}
+	c, err := json.Marshal(bc.Config)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+//
+// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
+// config. This method iterates through that list in order, and stops at the
+// first policy that is supported.
+//   - If the config for the first supported policy is invalid, the whole service
+//     config is invalid.
+//   - If the list doesn't contain any supported policy, the whole service config
+//     is invalid.
+func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
+	var ir intermediateBalancerConfig
+	err := json.Unmarshal(b, &ir)
+	if err != nil {
+		return err
+	}
+
+	var names []string
+	for i, lbcfg := range ir {
+		if len(lbcfg) != 1 {
+			return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
+		}
+
+		var (
+			name    string
+			jsonCfg json.RawMessage
+		)
+		// Get the key:value pair from the map. We have already made sure that
+		// the map contains a single entry.
+		for name, jsonCfg = range lbcfg {
+		}
+
+		names = append(names, name)
+		builder := balancer.Get(name)
+		if builder == nil {
+			// If the balancer is not registered, move on to the next config.
+			// This is not an error.
+			continue
+		}
+		bc.Name = name
+
+		parser, ok := builder.(balancer.ConfigParser)
+		if !ok {
+			if string(jsonCfg) != "{}" {
+				logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
+			}
+			// Stop at this, though the builder doesn't support parsing config.
+			return nil
+		}
+
+		cfg, err := parser.ParseConfig(jsonCfg)
+		if err != nil {
+			return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+		}
+		bc.Config = cfg
+		return nil
+	}
+	// This is reached when the for loop iterates over all entries, but didn't
+	// return. This means we had a loadBalancingConfig slice but did not
+	// encounter a registered policy. The config is considered invalid in this
+	// case.
+	return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
+}
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+type MethodConfig struct {
+	// WaitForReady indicates whether RPCs sent to this method should wait until
+	// the connection is ready by default (!failfast). The value specified via the
+	// gRPC client API will override the value set here.
+	WaitForReady *bool
+	// Timeout is the default timeout for RPCs sent to this method. The actual
+	// deadline used will be the minimum of the value specified here and the value
+	// set by the application via the gRPC client API.  If either one is not set,
+	// then the other will be used.  If neither is set, then the RPC has no deadline.
+	Timeout *time.Duration
+	// MaxReqSize is the maximum allowed payload size for an individual request in a
+	// stream (client->server) in bytes. The size which is measured is the serialized
+	// payload after per-message compression (but before stream compression) in bytes.
+	// The actual value used is the minimum of the value specified here and the value set
+	// by the application via the gRPC client API. If either one is not set, then the other
+	// will be used.  If neither is set, then the built-in default is used.
+	MaxReqSize *int
+	// MaxRespSize is the maximum allowed payload size for an individual response in a
+	// stream (server->client) in bytes.
+	MaxRespSize *int
+	// RetryPolicy configures retry options for the method.
+	RetryPolicy *RetryPolicy
+}
+
+// RetryPolicy defines the go-native version of the retry policy defined by the
+// service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type RetryPolicy struct {
+	// MaxAttempts is the maximum number of attempts, including the original RPC.
+	//
+	// This field is required and must be two or greater.
+	MaxAttempts int
+
+	// Exponential backoff parameters. The initial retry attempt will occur at
+	// random(0, initialBackoff). In general, the nth attempt will occur at
+	// random(0,
+	//   min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
+	//
+	// These fields are required and must be greater than zero.
+	InitialBackoff    time.Duration
+	MaxBackoff        time.Duration
+	BackoffMultiplier float64
+
+	// The set of status codes which may be retried.
+	//
+	// Status codes are specified as strings, e.g., "UNAVAILABLE".
+	//
+	// This field is required and must be non-empty.
+	// Note: a set is used to store this for easy lookup.
+	RetryableStatusCodes map[codes.Code]bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
new file mode 100644
index 000000000..b0ead4f54
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -0,0 +1,176 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package status implements errors returned by gRPC.  These errors are
+// serialized and transmitted on the wire between server and client, and allow
+// for additional data to be transmitted via the Details field in the status
+// proto.  gRPC service handlers should return an error created by this
+// package, and gRPC clients should expect a corresponding error to be
+// returned from the RPC call.
+//
+// This package upholds the invariants that a non-nil error may not
+// contain an OK code, and an OK code must result in a nil error.
+package status
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	spb "google.golang.org/genproto/googleapis/rpc/status"
+	"google.golang.org/grpc/codes"
+)
+
+// Status represents an RPC status code, message, and details.  It is immutable
+// and should be created with New, Newf, or FromProto.
+type Status struct {
+	s *spb.Status
+}
+
+// New returns a Status representing c and msg.
+func New(c codes.Code, msg string) *Status {
+	return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
+}
+
+// Newf returns New(c, fmt.Sprintf(format, a...)).
+func Newf(c codes.Code, format string, a ...interface{}) *Status {
+	return New(c, fmt.Sprintf(format, a...))
+}
+
+// FromProto returns a Status representing s.
+func FromProto(s *spb.Status) *Status {
+	return &Status{s: proto.Clone(s).(*spb.Status)}
+}
+
+// Err returns an error representing c and msg.  If c is OK, returns nil.
+func Err(c codes.Code, msg string) error {
+	return New(c, msg).Err()
+}
+
+// Errorf returns Error(c, fmt.Sprintf(format, a...)).
+func Errorf(c codes.Code, format string, a ...interface{}) error {
+	return Err(c, fmt.Sprintf(format, a...))
+}
+
+// Code returns the status code contained in s.
+func (s *Status) Code() codes.Code {
+	if s == nil || s.s == nil {
+		return codes.OK
+	}
+	return codes.Code(s.s.Code)
+}
+
+// Message returns the message contained in s.
+func (s *Status) Message() string {
+	if s == nil || s.s == nil {
+		return ""
+	}
+	return s.s.Message
+}
+
+// Proto returns s's status as an spb.Status proto message.
+func (s *Status) Proto() *spb.Status {
+	if s == nil {
+		return nil
+	}
+	return proto.Clone(s.s).(*spb.Status)
+}
+
+// Err returns an immutable error representing s; returns nil if s.Code() is OK.
+func (s *Status) Err() error {
+	if s.Code() == codes.OK {
+		return nil
+	}
+	return &Error{s: s}
+}
+
+// WithDetails returns a new status with the provided details messages appended to the status.
+// If any errors are encountered, it returns nil and the first error encountered.
+func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
+	if s.Code() == codes.OK {
+		return nil, errors.New("no error details for status with code OK")
+	}
+	// s.Code() != OK implies that s.Proto() != nil.
+	p := s.Proto()
+	for _, detail := range details {
+		any, err := ptypes.MarshalAny(detail)
+		if err != nil {
+			return nil, err
+		}
+		p.Details = append(p.Details, any)
+	}
+	return &Status{s: p}, nil
+}
+
+// Details returns a slice of details messages attached to the status.
+// If a detail cannot be decoded, the error is returned in place of the detail.
+func (s *Status) Details() []interface{} {
+	if s == nil || s.s == nil {
+		return nil
+	}
+	details := make([]interface{}, 0, len(s.s.Details))
+	for _, any := range s.s.Details {
+		detail := &ptypes.DynamicAny{}
+		if err := ptypes.UnmarshalAny(any, detail); err != nil {
+			details = append(details, err)
+			continue
+		}
+		details = append(details, detail.Message)
+	}
+	return details
+}
+
+func (s *Status) String() string {
+	return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
+}
+
+// Error wraps a pointer of a status proto. It implements error and Status,
+// and a nil *Error should never be returned by this package.
+type Error struct {
+	s *Status
+}
+
+func (e *Error) Error() string {
+	return e.s.String()
+}
+
+// GRPCStatus returns the Status represented by se.
+func (e *Error) GRPCStatus() *Status {
+	return e.s
+}
+
+// Is implements future error.Is functionality.
+// A Error is equivalent if the code and message are identical.
+func (e *Error) Is(target error) bool {
+	tse, ok := target.(*Error)
+	if !ok {
+		return false
+	}
+	return proto.Equal(e.s.s, tse.s.s)
+}
+
+// IsRestrictedControlPlaneCode returns whether the status includes a code
+// restricted for control plane usage as defined by gRFC A54.
+func IsRestrictedControlPlaneCode(s *Status) bool {
+	switch s.Code() {
+	case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
+		return true
+	}
+	return false
+}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
new file mode 100644
index 000000000..b3a72276d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -0,0 +1,112 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package syscall provides functionalities that grpc uses to get low-level operating system
+// stats/info.
+package syscall
+
+import (
+	"fmt"
+	"net"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("core")
+
+// GetCPUTime returns the how much CPU time has passed since the start of this process.
+func GetCPUTime() int64 {
+	var ts unix.Timespec
+	if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
+		logger.Fatal(err)
+	}
+	return ts.Nano()
+}
+
+// Rusage is an alias for syscall.Rusage under linux environment.
+type Rusage = syscall.Rusage
+
+// GetRusage returns the resource usage of current process.
+func GetRusage() *Rusage {
+	rusage := new(Rusage)
+	syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
+	return rusage
+}
+
+// CPUTimeDiff returns the differences of user CPU time and system CPU time used
+// between two Rusage structs.
+func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+	var (
+		utimeDiffs  = latest.Utime.Sec - first.Utime.Sec
+		utimeDiffus = latest.Utime.Usec - first.Utime.Usec
+		stimeDiffs  = latest.Stime.Sec - first.Stime.Sec
+		stimeDiffus = latest.Stime.Usec - first.Stime.Usec
+	)
+
+	uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
+	sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
+
+	return uTimeElapsed, sTimeElapsed
+}
+
+// SetTCPUserTimeout sets the TCP user timeout on a connection's socket
+func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+	tcpconn, ok := conn.(*net.TCPConn)
+	if !ok {
+		// not a TCP connection. exit early
+		return nil
+	}
+	rawConn, err := tcpconn.SyscallConn()
+	if err != nil {
+		return fmt.Errorf("error getting raw connection: %v", err)
+	}
+	err = rawConn.Control(func(fd uintptr) {
+		err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond))
+	})
+	if err != nil {
+		return fmt.Errorf("error setting option on socket: %v", err)
+	}
+
+	return nil
+}
+
+// GetTCPUserTimeout gets the TCP user timeout on a connection's socket
+func GetTCPUserTimeout(conn net.Conn) (opt int, err error) {
+	tcpconn, ok := conn.(*net.TCPConn)
+	if !ok {
+		err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn)
+		return
+	}
+	rawConn, err := tcpconn.SyscallConn()
+	if err != nil {
+		err = fmt.Errorf("error getting raw connection: %v", err)
+		return
+	}
+	err = rawConn.Control(func(fd uintptr) {
+		opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT)
+	})
+	if err != nil {
+		err = fmt.Errorf("error getting option on socket: %v", err)
+		return
+	}
+
+	return
+}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
new file mode 100644
index 000000000..999f52cd7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -0,0 +1,77 @@
+//go:build !linux
+// +build !linux
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package syscall provides functionalities that grpc uses to get low-level
+// operating system stats/info.
+package syscall
+
+import (
+	"net"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var once sync.Once
+var logger = grpclog.Component("core")
+
+func log() {
+	once.Do(func() {
+		logger.Info("CPU time info is unavailable on non-linux environments.")
+	})
+}
+
+// GetCPUTime returns the how much CPU time has passed since the start of this
+// process. It always returns 0 under non-linux environments.
+func GetCPUTime() int64 {
+	log()
+	return 0
+}
+
+// Rusage is an empty struct under non-linux environments.
+type Rusage struct{}
+
+// GetRusage is a no-op function under non-linux environments.
+func GetRusage() *Rusage {
+	log()
+	return nil
+}
+
+// CPUTimeDiff returns the differences of user CPU time and system CPU time used
+// between two Rusage structs. It a no-op function for non-linux environments.
+func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+	log()
+	return 0, 0
+}
+
+// SetTCPUserTimeout is a no-op function under non-linux environments.
+func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+	log()
+	return nil
+}
+
+// GetTCPUserTimeout is a no-op function under non-linux environments.
+// A negative return value indicates the operation is not supported
+func GetTCPUserTimeout(conn net.Conn) (int, error) {
+	log()
+	return -1, nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
new file mode 100644
index 000000000..070680edb
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"sync"
+	"time"
+)
+
+const (
+	// bdpLimit is the maximum value the flow control windows will be increased
+	// to.  TCP typically limits this to 4MB, but some systems go up to 16MB.
+	// Since this is only a limit, it is safe to make it optimistic.
+	bdpLimit = (1 << 20) * 16
+	// alpha is a constant factor used to keep a moving average
+	// of RTTs.
+	alpha = 0.9
+	// If the current bdp sample is greater than or equal to
+	// our beta * our estimated bdp and the current bandwidth
+	// sample is the maximum bandwidth observed so far, we
+	// increase our bbp estimate by a factor of gamma.
+	beta = 0.66
+	// To put our bdp to be smaller than or equal to twice the real BDP,
+	// we should multiply our current sample with 4/3, however to round things out
+	// we use 2 as the multiplication factor.
+	gamma = 2
+)
+
+// Adding arbitrary data to ping so that its ack can be identified.
+// Easter-egg: what does the ping message say?
+var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
+
+type bdpEstimator struct {
+	// sentAt is the time when the ping was sent.
+	sentAt time.Time
+
+	mu sync.Mutex
+	// bdp is the current bdp estimate.
+	bdp uint32
+	// sample is the number of bytes received in one measurement cycle.
+	sample uint32
+	// bwMax is the maximum bandwidth noted so far (bytes/sec).
+	bwMax float64
+	// bool to keep track of the beginning of a new measurement cycle.
+	isSent bool
+	// Callback to update the window sizes.
+	updateFlowControl func(n uint32)
+	// sampleCount is the number of samples taken so far.
+	sampleCount uint64
+	// round trip time (seconds)
+	rtt float64
+}
+
+// timesnap registers the time bdp ping was sent out so that
+// network rtt can be calculated when its ack is received.
+// It is called (by controller) when the bdpPing is
+// being written on the wire.
+func (b *bdpEstimator) timesnap(d [8]byte) {
+	if bdpPing.data != d {
+		return
+	}
+	b.sentAt = time.Now()
+}
+
+// add adds bytes to the current sample for calculating bdp.
+// It returns true only if a ping must be sent. This can be used
+// by the caller (handleData) to make decision about batching
+// a window update with it.
+func (b *bdpEstimator) add(n uint32) bool {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.bdp == bdpLimit {
+		return false
+	}
+	if !b.isSent {
+		b.isSent = true
+		b.sample = n
+		b.sentAt = time.Time{}
+		b.sampleCount++
+		return true
+	}
+	b.sample += n
+	return false
+}
+
+// calculate is called when an ack for a bdp ping is received.
+// Here we calculate the current bdp and bandwidth sample and
+// decide if the flow control windows should go up.
+func (b *bdpEstimator) calculate(d [8]byte) {
+	// Check if the ping acked for was the bdp ping.
+	if bdpPing.data != d {
+		return
+	}
+	b.mu.Lock()
+	rttSample := time.Since(b.sentAt).Seconds()
+	if b.sampleCount < 10 {
+		// Bootstrap rtt with an average of first 10 rtt samples.
+		b.rtt += (rttSample - b.rtt) / float64(b.sampleCount)
+	} else {
+		// Heed to the recent past more.
+		b.rtt += (rttSample - b.rtt) * float64(alpha)
+	}
+	b.isSent = false
+	// The number of bytes accumulated so far in the sample is smaller
+	// than or equal to 1.5 times the real BDP on a saturated connection.
+	bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
+	if bwCurrent > b.bwMax {
+		b.bwMax = bwCurrent
+	}
+	// If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is
+	// greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we
+	// should update our perception of the network BDP.
+	if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit {
+		sampleFloat := float64(b.sample)
+		b.bdp = uint32(gamma * sampleFloat)
+		if b.bdp > bdpLimit {
+			b.bdp = bdpLimit
+		}
+		bdp := b.bdp
+		b.mu.Unlock()
+		b.updateFlowControl(bdp)
+		return
+	}
+	b.mu.Unlock()
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
new file mode 100644
index 000000000..9097385e1
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -0,0 +1,998 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"runtime"
+	"strconv"
+	"sync"
+	"sync/atomic"
+
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/status"
+)
+
+var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
+	e.SetMaxDynamicTableSizeLimit(v)
+}
+
+type itemNode struct {
+	it   interface{}
+	next *itemNode
+}
+
+type itemList struct {
+	head *itemNode
+	tail *itemNode
+}
+
+func (il *itemList) enqueue(i interface{}) {
+	n := &itemNode{it: i}
+	if il.tail == nil {
+		il.head, il.tail = n, n
+		return
+	}
+	il.tail.next = n
+	il.tail = n
+}
+
+// peek returns the first item in the list without removing it from the
+// list.
+func (il *itemList) peek() interface{} {
+	return il.head.it
+}
+
+func (il *itemList) dequeue() interface{} {
+	if il.head == nil {
+		return nil
+	}
+	i := il.head.it
+	il.head = il.head.next
+	if il.head == nil {
+		il.tail = nil
+	}
+	return i
+}
+
+func (il *itemList) dequeueAll() *itemNode {
+	h := il.head
+	il.head, il.tail = nil, nil
+	return h
+}
+
+func (il *itemList) isEmpty() bool {
+	return il.head == nil
+}
+
+// The following defines various control items which could flow through
+// the control buffer of transport. They represent different aspects of
+// control tasks, e.g., flow control, settings, streaming resetting, etc.
+
+// maxQueuedTransportResponseFrames is the most queued "transport response"
+// frames we will buffer before preventing new reads from occurring on the
+// transport.  These are control frames sent in response to client requests,
+// such as RST_STREAM due to bad headers or settings acks.
+const maxQueuedTransportResponseFrames = 50
+
+type cbItem interface {
+	isTransportResponseFrame() bool
+}
+
+// registerStream is used to register an incoming stream with loopy writer.
+type registerStream struct {
+	streamID uint32
+	wq       *writeQuota
+}
+
+func (*registerStream) isTransportResponseFrame() bool { return false }
+
+// headerFrame is also used to register stream on the client-side.
+type headerFrame struct {
+	streamID   uint32
+	hf         []hpack.HeaderField
+	endStream  bool               // Valid on server side.
+	initStream func(uint32) error // Used only on the client side.
+	onWrite    func()
+	wq         *writeQuota    // write quota for the stream created.
+	cleanup    *cleanupStream // Valid on the server side.
+	onOrphaned func(error)    // Valid on client-side
+}
+
+func (h *headerFrame) isTransportResponseFrame() bool {
+	return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
+}
+
+type cleanupStream struct {
+	streamID uint32
+	rst      bool
+	rstCode  http2.ErrCode
+	onWrite  func()
+}
+
+func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
+
+type earlyAbortStream struct {
+	httpStatus     uint32
+	streamID       uint32
+	contentSubtype string
+	status         *status.Status
+	rst            bool
+}
+
+func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
+
+type dataFrame struct {
+	streamID  uint32
+	endStream bool
+	h         []byte
+	d         []byte
+	// onEachWrite is called every time
+	// a part of d is written out.
+	onEachWrite func()
+}
+
+func (*dataFrame) isTransportResponseFrame() bool { return false }
+
+type incomingWindowUpdate struct {
+	streamID  uint32
+	increment uint32
+}
+
+func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
+
+type outgoingWindowUpdate struct {
+	streamID  uint32
+	increment uint32
+}
+
+func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
+	return false // window updates are throttled by thresholds
+}
+
+type incomingSettings struct {
+	ss []http2.Setting
+}
+
+func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
+
+type outgoingSettings struct {
+	ss []http2.Setting
+}
+
+func (*outgoingSettings) isTransportResponseFrame() bool { return false }
+
+type incomingGoAway struct {
+}
+
+func (*incomingGoAway) isTransportResponseFrame() bool { return false }
+
+type goAway struct {
+	code      http2.ErrCode
+	debugData []byte
+	headsUp   bool
+	closeConn error // if set, loopyWriter will exit, resulting in conn closure
+}
+
+func (*goAway) isTransportResponseFrame() bool { return false }
+
+type ping struct {
+	ack  bool
+	data [8]byte
+}
+
+func (*ping) isTransportResponseFrame() bool { return true }
+
+type outFlowControlSizeRequest struct {
+	resp chan uint32
+}
+
+func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
+
+// closeConnection is an instruction to tell the loopy writer to flush the
+// framer and exit, which will cause the transport's connection to be closed
+// (by the client or server).  The transport itself will close after the reader
+// encounters the EOF caused by the connection closure.
+type closeConnection struct{}
+
+func (closeConnection) isTransportResponseFrame() bool { return false }
+
+type outStreamState int
+
+const (
+	active outStreamState = iota
+	empty
+	waitingOnStreamQuota
+)
+
+type outStream struct {
+	id               uint32
+	state            outStreamState
+	itl              *itemList
+	bytesOutStanding int
+	wq               *writeQuota
+
+	next *outStream
+	prev *outStream
+}
+
+func (s *outStream) deleteSelf() {
+	if s.prev != nil {
+		s.prev.next = s.next
+	}
+	if s.next != nil {
+		s.next.prev = s.prev
+	}
+	s.next, s.prev = nil, nil
+}
+
+type outStreamList struct {
+	// Following are sentinel objects that mark the
+	// beginning and end of the list. They do not
+	// contain any item lists. All valid objects are
+	// inserted in between them.
+	// This is needed so that an outStream object can
+	// deleteSelf() in O(1) time without knowing which
+	// list it belongs to.
+	head *outStream
+	tail *outStream
+}
+
+func newOutStreamList() *outStreamList {
+	head, tail := new(outStream), new(outStream)
+	head.next = tail
+	tail.prev = head
+	return &outStreamList{
+		head: head,
+		tail: tail,
+	}
+}
+
+func (l *outStreamList) enqueue(s *outStream) {
+	e := l.tail.prev
+	e.next = s
+	s.prev = e
+	s.next = l.tail
+	l.tail.prev = s
+}
+
+// remove from the beginning of the list.
+func (l *outStreamList) dequeue() *outStream {
+	b := l.head.next
+	if b == l.tail {
+		return nil
+	}
+	b.deleteSelf()
+	return b
+}
+
+// controlBuffer is a way to pass information to loopy.
+// Information is passed as specific struct types called control frames.
+// A control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state.
+// It shouldn't be confused with an HTTP2 frame, although some of the control frames
+// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
+type controlBuffer struct {
+	ch              chan struct{}
+	done            <-chan struct{}
+	mu              sync.Mutex
+	consumerWaiting bool
+	list            *itemList
+	err             error
+
+	// transportResponseFrames counts the number of queued items that represent
+	// the response of an action initiated by the peer.  trfChan is created
+	// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
+	// closed and nilled when transportResponseFrames drops below the
+	// threshold.  Both fields are protected by mu.
+	transportResponseFrames int
+	trfChan                 atomic.Value // chan struct{}
+}
+
+func newControlBuffer(done <-chan struct{}) *controlBuffer {
+	return &controlBuffer{
+		ch:   make(chan struct{}, 1),
+		list: &itemList{},
+		done: done,
+	}
+}
+
+// throttle blocks if there are too many incomingSettings/cleanupStreams in the
+// controlbuf.
+func (c *controlBuffer) throttle() {
+	ch, _ := c.trfChan.Load().(chan struct{})
+	if ch != nil {
+		select {
+		case <-ch:
+		case <-c.done:
+		}
+	}
+}
+
+func (c *controlBuffer) put(it cbItem) error {
+	_, err := c.executeAndPut(nil, it)
+	return err
+}
+
+func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
+	var wakeUp bool
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return false, c.err
+	}
+	if f != nil {
+		if !f(it) { // f wasn't successful
+			c.mu.Unlock()
+			return false, nil
+		}
+	}
+	if c.consumerWaiting {
+		wakeUp = true
+		c.consumerWaiting = false
+	}
+	c.list.enqueue(it)
+	if it.isTransportResponseFrame() {
+		c.transportResponseFrames++
+		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+			// We are adding the frame that puts us over the threshold; create
+			// a throttling channel.
+			c.trfChan.Store(make(chan struct{}))
+		}
+	}
+	c.mu.Unlock()
+	if wakeUp {
+		select {
+		case c.ch <- struct{}{}:
+		default:
+		}
+	}
+	return true, nil
+}
+
+// Note argument f should never be nil.
+func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return false, c.err
+	}
+	if !f(it) { // f wasn't successful
+		c.mu.Unlock()
+		return false, nil
+	}
+	c.mu.Unlock()
+	return true, nil
+}
+
+func (c *controlBuffer) get(block bool) (interface{}, error) {
+	for {
+		c.mu.Lock()
+		if c.err != nil {
+			c.mu.Unlock()
+			return nil, c.err
+		}
+		if !c.list.isEmpty() {
+			h := c.list.dequeue().(cbItem)
+			if h.isTransportResponseFrame() {
+				if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+					// We are removing the frame that put us over the
+					// threshold; close and clear the throttling channel.
+					ch := c.trfChan.Load().(chan struct{})
+					close(ch)
+					c.trfChan.Store((chan struct{})(nil))
+				}
+				c.transportResponseFrames--
+			}
+			c.mu.Unlock()
+			return h, nil
+		}
+		if !block {
+			c.mu.Unlock()
+			return nil, nil
+		}
+		c.consumerWaiting = true
+		c.mu.Unlock()
+		select {
+		case <-c.ch:
+		case <-c.done:
+			return nil, errors.New("transport closed by client")
+		}
+	}
+}
+
+func (c *controlBuffer) finish() {
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return
+	}
+	c.err = ErrConnClosing
+	// There may be headers for streams in the control buffer.
+	// These streams need to be cleaned out since the transport
+	// is still not aware of these yet.
+	for head := c.list.dequeueAll(); head != nil; head = head.next {
+		hdr, ok := head.it.(*headerFrame)
+		if !ok {
+			continue
+		}
+		if hdr.onOrphaned != nil { // It will be nil on the server-side.
+			hdr.onOrphaned(ErrConnClosing)
+		}
+	}
+	// In case throttle() is currently in flight, it needs to be unblocked.
+	// Otherwise, the transport may not close, since the transport is closed by
+	// the reader encountering the connection error.
+	ch, _ := c.trfChan.Load().(chan struct{})
+	if ch != nil {
+		close(ch)
+	}
+	c.trfChan.Store((chan struct{})(nil))
+	c.mu.Unlock()
+}
+
+type side int
+
+const (
+	clientSide side = iota
+	serverSide
+)
+
+// Loopy receives frames from the control buffer.
+// Each frame is handled individually; most of the work done by loopy goes
+// into handling data frames. Loopy maintains a queue of active streams, and each
+// stream maintains a queue of data frames; as loopy receives data frames
+// it gets added to the queue of the relevant stream.
+// Loopy goes over this list of active streams by processing one node every iteration,
+// thereby closely resemebling to a round-robin scheduling over all streams. While
+// processing a stream, loopy writes out data bytes from this stream capped by the min
+// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
+type loopyWriter struct {
+	side      side
+	cbuf      *controlBuffer
+	sendQuota uint32
+	oiws      uint32 // outbound initial window size.
+	// estdStreams is map of all established streams that are not cleaned-up yet.
+	// On client-side, this is all streams whose headers were sent out.
+	// On server-side, this is all streams whose headers were received.
+	estdStreams map[uint32]*outStream // Established streams.
+	// activeStreams is a linked-list of all streams that have data to send and some
+	// stream-level flow control quota.
+	// Each of these streams internally have a list of data items(and perhaps trailers
+	// on the server-side) to be sent out.
+	activeStreams *outStreamList
+	framer        *framer
+	hBuf          *bytes.Buffer  // The buffer for HPACK encoding.
+	hEnc          *hpack.Encoder // HPACK encoder.
+	bdpEst        *bdpEstimator
+	draining      bool
+
+	// Side-specific handlers
+	ssGoAwayHandler func(*goAway) (bool, error)
+}
+
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
+	var buf bytes.Buffer
+	l := &loopyWriter{
+		side:          s,
+		cbuf:          cbuf,
+		sendQuota:     defaultWindowSize,
+		oiws:          defaultWindowSize,
+		estdStreams:   make(map[uint32]*outStream),
+		activeStreams: newOutStreamList(),
+		framer:        fr,
+		hBuf:          &buf,
+		hEnc:          hpack.NewEncoder(&buf),
+		bdpEst:        bdpEst,
+	}
+	return l
+}
+
+const minBatchSize = 1000
+
+// run should be run in a separate goroutine.
+// It reads control frames from controlBuf and processes them by:
+// 1. Updating loopy's internal state, or/and
+// 2. Writing out HTTP2 frames on the wire.
+//
+// Loopy keeps all active streams with data to send in a linked-list.
+// All streams in the activeStreams linked-list must have both:
+// 1. Data to send, and
+// 2. Stream level flow control quota available.
+//
+// In each iteration of run loop, other than processing the incoming control
+// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
+// This results in writing of HTTP2 frames into an underlying write buffer.
+// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
+// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
+// if the batch size is too low to give stream goroutines a chance to fill it up.
+func (l *loopyWriter) run() (err error) {
+	// Always flush the writer before exiting in case there are pending frames
+	// to be sent.
+	defer l.framer.writer.Flush()
+	for {
+		it, err := l.cbuf.get(true)
+		if err != nil {
+			return err
+		}
+		if err = l.handle(it); err != nil {
+			return err
+		}
+		if _, err = l.processData(); err != nil {
+			return err
+		}
+		gosched := true
+	hasdata:
+		for {
+			it, err := l.cbuf.get(false)
+			if err != nil {
+				return err
+			}
+			if it != nil {
+				if err = l.handle(it); err != nil {
+					return err
+				}
+				if _, err = l.processData(); err != nil {
+					return err
+				}
+				continue hasdata
+			}
+			isEmpty, err := l.processData()
+			if err != nil {
+				return err
+			}
+			if !isEmpty {
+				continue hasdata
+			}
+			if gosched {
+				gosched = false
+				if l.framer.writer.offset < minBatchSize {
+					runtime.Gosched()
+					continue hasdata
+				}
+			}
+			l.framer.writer.Flush()
+			break hasdata
+		}
+	}
+}
+
+func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
+	return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
+}
+
+func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
+	// Otherwise update the quota.
+	if w.streamID == 0 {
+		l.sendQuota += w.increment
+		return nil
+	}
+	// Find the stream and update it.
+	if str, ok := l.estdStreams[w.streamID]; ok {
+		str.bytesOutStanding -= int(w.increment)
+		if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
+			str.state = active
+			l.activeStreams.enqueue(str)
+			return nil
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
+	return l.framer.fr.WriteSettings(s.ss...)
+}
+
+func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
+	if err := l.applySettings(s.ss); err != nil {
+		return err
+	}
+	return l.framer.fr.WriteSettingsAck()
+}
+
+func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
+	str := &outStream{
+		id:    h.streamID,
+		state: empty,
+		itl:   &itemList{},
+		wq:    h.wq,
+	}
+	l.estdStreams[h.streamID] = str
+	return nil
+}
+
+func (l *loopyWriter) headerHandler(h *headerFrame) error {
+	if l.side == serverSide {
+		str, ok := l.estdStreams[h.streamID]
+		if !ok {
+			if logger.V(logLevel) {
+				logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+			}
+			return nil
+		}
+		// Case 1.A: Server is responding back with headers.
+		if !h.endStream {
+			return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
+		}
+		// else:  Case 1.B: Server wants to close stream.
+
+		if str.state != empty { // either active or waiting on stream quota.
+			// add it str's list of items.
+			str.itl.enqueue(h)
+			return nil
+		}
+		if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
+			return err
+		}
+		return l.cleanupStreamHandler(h.cleanup)
+	}
+	// Case 2: Client wants to originate stream.
+	str := &outStream{
+		id:    h.streamID,
+		state: empty,
+		itl:   &itemList{},
+		wq:    h.wq,
+	}
+	return l.originateStream(str, h)
+}
+
+func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
+	// l.draining is set when handling GoAway. In which case, we want to avoid
+	// creating new streams.
+	if l.draining {
+		// TODO: provide a better error with the reason we are in draining.
+		hdr.onOrphaned(errStreamDrain)
+		return nil
+	}
+	if err := hdr.initStream(str.id); err != nil {
+		return err
+	}
+	if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
+		return err
+	}
+	l.estdStreams[str.id] = str
+	return nil
+}
+
+func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
+	if onWrite != nil {
+		onWrite()
+	}
+	l.hBuf.Reset()
+	for _, f := range hf {
+		if err := l.hEnc.WriteField(f); err != nil {
+			if logger.V(logLevel) {
+				logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
+			}
+		}
+	}
+	var (
+		err               error
+		endHeaders, first bool
+	)
+	first = true
+	for !endHeaders {
+		size := l.hBuf.Len()
+		if size > http2MaxFrameLen {
+			size = http2MaxFrameLen
+		} else {
+			endHeaders = true
+		}
+		if first {
+			first = false
+			err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
+				StreamID:      streamID,
+				BlockFragment: l.hBuf.Next(size),
+				EndStream:     endStream,
+				EndHeaders:    endHeaders,
+			})
+		} else {
+			err = l.framer.fr.WriteContinuation(
+				streamID,
+				endHeaders,
+				l.hBuf.Next(size),
+			)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) preprocessData(df *dataFrame) error {
+	str, ok := l.estdStreams[df.streamID]
+	if !ok {
+		return nil
+	}
+	// If we got data for a stream it means that
+	// stream was originated and the headers were sent out.
+	str.itl.enqueue(df)
+	if str.state == empty {
+		str.state = active
+		l.activeStreams.enqueue(str)
+	}
+	return nil
+}
+
+func (l *loopyWriter) pingHandler(p *ping) error {
+	if !p.ack {
+		l.bdpEst.timesnap(p.data)
+	}
+	return l.framer.fr.WritePing(p.ack, p.data)
+
+}
+
+func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
+	o.resp <- l.sendQuota
+	return nil
+}
+
+func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
+	c.onWrite()
+	if str, ok := l.estdStreams[c.streamID]; ok {
+		// On the server side it could be a trailers-only response or
+		// a RST_STREAM before stream initialization thus the stream might
+		// not be established yet.
+		delete(l.estdStreams, c.streamID)
+		str.deleteSelf()
+	}
+	if c.rst { // If RST_STREAM needs to be sent.
+		if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
+			return err
+		}
+	}
+	if l.draining && len(l.estdStreams) == 0 {
+		return errors.New("finished processing active streams while in draining mode")
+	}
+	return nil
+}
+
+func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
+	if l.side == clientSide {
+		return errors.New("earlyAbortStream not handled on client")
+	}
+	// In case the caller forgets to set the http status, default to 200.
+	if eas.httpStatus == 0 {
+		eas.httpStatus = 200
+	}
+	headerFields := []hpack.HeaderField{
+		{Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
+		{Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
+		{Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
+		{Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
+	}
+
+	if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
+		return err
+	}
+	if eas.rst {
+		if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
+	if l.side == clientSide {
+		l.draining = true
+		if len(l.estdStreams) == 0 {
+			return errors.New("received GOAWAY with no active streams")
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) goAwayHandler(g *goAway) error {
+	// Handling of outgoing GoAway is very specific to side.
+	if l.ssGoAwayHandler != nil {
+		draining, err := l.ssGoAwayHandler(g)
+		if err != nil {
+			return err
+		}
+		l.draining = draining
+	}
+	return nil
+}
+
+func (l *loopyWriter) closeConnectionHandler() error {
+	// Exit loopyWriter entirely by returning an error here.  This will lead to
+	// the transport closing the connection, and, ultimately, transport
+	// closure.
+	return ErrConnClosing
+}
+
+func (l *loopyWriter) handle(i interface{}) error {
+	switch i := i.(type) {
+	case *incomingWindowUpdate:
+		return l.incomingWindowUpdateHandler(i)
+	case *outgoingWindowUpdate:
+		return l.outgoingWindowUpdateHandler(i)
+	case *incomingSettings:
+		return l.incomingSettingsHandler(i)
+	case *outgoingSettings:
+		return l.outgoingSettingsHandler(i)
+	case *headerFrame:
+		return l.headerHandler(i)
+	case *registerStream:
+		return l.registerStreamHandler(i)
+	case *cleanupStream:
+		return l.cleanupStreamHandler(i)
+	case *earlyAbortStream:
+		return l.earlyAbortStreamHandler(i)
+	case *incomingGoAway:
+		return l.incomingGoAwayHandler(i)
+	case *dataFrame:
+		return l.preprocessData(i)
+	case *ping:
+		return l.pingHandler(i)
+	case *goAway:
+		return l.goAwayHandler(i)
+	case *outFlowControlSizeRequest:
+		return l.outFlowControlSizeRequestHandler(i)
+	case closeConnection:
+		return l.closeConnectionHandler()
+	default:
+		return fmt.Errorf("transport: unknown control message type %T", i)
+	}
+}
+
+func (l *loopyWriter) applySettings(ss []http2.Setting) error {
+	for _, s := range ss {
+		switch s.ID {
+		case http2.SettingInitialWindowSize:
+			o := l.oiws
+			l.oiws = s.Val
+			if o < l.oiws {
+				// If the new limit is greater make all depleted streams active.
+				for _, stream := range l.estdStreams {
+					if stream.state == waitingOnStreamQuota {
+						stream.state = active
+						l.activeStreams.enqueue(stream)
+					}
+				}
+			}
+		case http2.SettingHeaderTableSize:
+			updateHeaderTblSize(l.hEnc, s.Val)
+		}
+	}
+	return nil
+}
+
+// processData removes the first stream from active streams, writes out at most 16KB
+// of its data and then puts it at the end of activeStreams if there's still more data
+// to be sent and stream has some stream-level flow control.
+func (l *loopyWriter) processData() (bool, error) {
+	if l.sendQuota == 0 {
+		return true, nil
+	}
+	str := l.activeStreams.dequeue() // Remove the first stream.
+	if str == nil {
+		return true, nil
+	}
+	dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
+	// A data item is represented by a dataFrame, since it later translates into
+	// multiple HTTP2 data frames.
+	// Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
+	// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
+	// maximum possible HTTP2 frame size.
+
+	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
+		// Client sends out empty data frame with endStream = true
+		if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
+			return false, err
+		}
+		str.itl.dequeue() // remove the empty data item from stream
+		if str.itl.isEmpty() {
+			str.state = empty
+		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
+			if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
+				return false, err
+			}
+			if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
+				return false, nil
+			}
+		} else {
+			l.activeStreams.enqueue(str)
+		}
+		return false, nil
+	}
+	var (
+		buf []byte
+	)
+	// Figure out the maximum size we can send
+	maxSize := http2MaxFrameLen
+	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
+		str.state = waitingOnStreamQuota
+		return false, nil
+	} else if maxSize > strQuota {
+		maxSize = strQuota
+	}
+	if maxSize > int(l.sendQuota) { // connection-level flow control.
+		maxSize = int(l.sendQuota)
+	}
+	// Compute how much of the header and data we can send within quota and max frame length
+	hSize := min(maxSize, len(dataItem.h))
+	dSize := min(maxSize-hSize, len(dataItem.d))
+	if hSize != 0 {
+		if dSize == 0 {
+			buf = dataItem.h
+		} else {
+			// We can add some data to grpc message header to distribute bytes more equally across frames.
+			// Copy on the stack to avoid generating garbage
+			var localBuf [http2MaxFrameLen]byte
+			copy(localBuf[:hSize], dataItem.h)
+			copy(localBuf[hSize:], dataItem.d[:dSize])
+			buf = localBuf[:hSize+dSize]
+		}
+	} else {
+		buf = dataItem.d
+	}
+
+	size := hSize + dSize
+
+	// Now that outgoing flow controls are checked we can replenish str's write quota
+	str.wq.replenish(size)
+	var endStream bool
+	// If this is the last data message on this stream and all of it can be written in this iteration.
+	if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
+		endStream = true
+	}
+	if dataItem.onEachWrite != nil {
+		dataItem.onEachWrite()
+	}
+	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
+		return false, err
+	}
+	str.bytesOutStanding += size
+	l.sendQuota -= uint32(size)
+	dataItem.h = dataItem.h[hSize:]
+	dataItem.d = dataItem.d[dSize:]
+
+	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
+		str.itl.dequeue()
+	}
+	if str.itl.isEmpty() {
+		str.state = empty
+	} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
+		if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
+			return false, err
+		}
+		if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
+			return false, err
+		}
+	} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
+		str.state = waitingOnStreamQuota
+	} else { // Otherwise add it back to the list of active streams.
+		l.activeStreams.enqueue(str)
+	}
+	return false, nil
+}
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
new file mode 100644
index 000000000..bc8ee0747
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"math"
+	"time"
+)
+
+const (
+	// The default value of flow control window size in HTTP2 spec.
+	defaultWindowSize = 65535
+	// The initial window size for flow control.
+	initialWindowSize             = defaultWindowSize // for an RPC
+	infinity                      = time.Duration(math.MaxInt64)
+	defaultClientKeepaliveTime    = infinity
+	defaultClientKeepaliveTimeout = 20 * time.Second
+	defaultMaxStreamsClient       = 100
+	defaultMaxConnectionIdle      = infinity
+	defaultMaxConnectionAge       = infinity
+	defaultMaxConnectionAgeGrace  = infinity
+	defaultServerKeepaliveTime    = 2 * time.Hour
+	defaultServerKeepaliveTimeout = 20 * time.Second
+	defaultKeepalivePolicyMinTime = 5 * time.Minute
+	// max window limit set by HTTP2 Specs.
+	maxWindowSize = math.MaxInt32
+	// defaultWriteQuota is the default value for number of data
+	// bytes that each stream can schedule before some of it being
+	// flushed out.
+	defaultWriteQuota              = 64 * 1024
+	defaultClientMaxHeaderListSize = uint32(16 << 20)
+	defaultServerMaxHeaderListSize = uint32(16 << 20)
+)
+
+// MaxStreamID is the upper bound for the stream ID before the current
+// transport gracefully closes and new transport is created for subsequent RPCs.
+// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
+// integer. It's exported so that tests can override it.
+var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
new file mode 100644
index 000000000..97198c515
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -0,0 +1,215 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"fmt"
+	"math"
+	"sync"
+	"sync/atomic"
+)
+
+// writeQuota is a soft limit on the amount of data a stream can
+// schedule before some of it is written out.
+type writeQuota struct {
+	quota int32
+	// get waits on read from when quota goes less than or equal to zero.
+	// replenish writes on it when quota goes positive again.
+	ch chan struct{}
+	// done is triggered in error case.
+	done <-chan struct{}
+	// replenish is called by loopyWriter to give quota back to.
+	// It is implemented as a field so that it can be updated
+	// by tests.
+	replenish func(n int)
+}
+
+func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
+	w := &writeQuota{
+		quota: sz,
+		ch:    make(chan struct{}, 1),
+		done:  done,
+	}
+	w.replenish = w.realReplenish
+	return w
+}
+
+func (w *writeQuota) get(sz int32) error {
+	for {
+		if atomic.LoadInt32(&w.quota) > 0 {
+			atomic.AddInt32(&w.quota, -sz)
+			return nil
+		}
+		select {
+		case <-w.ch:
+			continue
+		case <-w.done:
+			return errStreamDone
+		}
+	}
+}
+
+func (w *writeQuota) realReplenish(n int) {
+	sz := int32(n)
+	a := atomic.AddInt32(&w.quota, sz)
+	b := a - sz
+	if b <= 0 && a > 0 {
+		select {
+		case w.ch <- struct{}{}:
+		default:
+		}
+	}
+}
+
+type trInFlow struct {
+	limit               uint32
+	unacked             uint32
+	effectiveWindowSize uint32
+}
+
+func (f *trInFlow) newLimit(n uint32) uint32 {
+	d := n - f.limit
+	f.limit = n
+	f.updateEffectiveWindowSize()
+	return d
+}
+
+func (f *trInFlow) onData(n uint32) uint32 {
+	f.unacked += n
+	if f.unacked >= f.limit/4 {
+		w := f.unacked
+		f.unacked = 0
+		f.updateEffectiveWindowSize()
+		return w
+	}
+	f.updateEffectiveWindowSize()
+	return 0
+}
+
+func (f *trInFlow) reset() uint32 {
+	w := f.unacked
+	f.unacked = 0
+	f.updateEffectiveWindowSize()
+	return w
+}
+
+func (f *trInFlow) updateEffectiveWindowSize() {
+	atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
+}
+
+func (f *trInFlow) getSize() uint32 {
+	return atomic.LoadUint32(&f.effectiveWindowSize)
+}
+
+// TODO(mmukhi): Simplify this code.
+// inFlow deals with inbound flow control
+type inFlow struct {
+	mu sync.Mutex
+	// The inbound flow control limit for pending data.
+	limit uint32
+	// pendingData is the overall data which have been received but not been
+	// consumed by applications.
+	pendingData uint32
+	// The amount of data the application has consumed but grpc has not sent
+	// window update for them. Used to reduce window update frequency.
+	pendingUpdate uint32
+	// delta is the extra window update given by receiver when an application
+	// is reading data bigger in size than the inFlow limit.
+	delta uint32
+}
+
+// newLimit updates the inflow window to a new value n.
+// It assumes that n is always greater than the old limit.
+func (f *inFlow) newLimit(n uint32) {
+	f.mu.Lock()
+	f.limit = n
+	f.mu.Unlock()
+}
+
+func (f *inFlow) maybeAdjust(n uint32) uint32 {
+	if n > uint32(math.MaxInt32) {
+		n = uint32(math.MaxInt32)
+	}
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	// estSenderQuota is the receiver's view of the maximum number of bytes the sender
+	// can send without a window update.
+	estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
+	// estUntransmittedData is the maximum number of bytes the sends might not have put
+	// on the wire yet. A value of 0 or less means that we have already received all or
+	// more bytes than the application is requesting to read.
+	estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
+	// This implies that unless we send a window update, the sender won't be able to send all the bytes
+	// for this message. Therefore we must send an update over the limit since there's an active read
+	// request from the application.
+	if estUntransmittedData > estSenderQuota {
+		// Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
+		if f.limit+n > maxWindowSize {
+			f.delta = maxWindowSize - f.limit
+		} else {
+			// Send a window update for the whole message and not just the difference between
+			// estUntransmittedData and estSenderQuota. This will be helpful in case the message
+			// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
+			f.delta = n
+		}
+		return f.delta
+	}
+	return 0
+}
+
+// onData is invoked when some data frame is received. It updates pendingData.
+func (f *inFlow) onData(n uint32) error {
+	f.mu.Lock()
+	f.pendingData += n
+	if f.pendingData+f.pendingUpdate > f.limit+f.delta {
+		limit := f.limit
+		rcvd := f.pendingData + f.pendingUpdate
+		f.mu.Unlock()
+		return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
+	}
+	f.mu.Unlock()
+	return nil
+}
+
+// onRead is invoked when the application reads the data. It returns the window size
+// to be sent to the peer.
+func (f *inFlow) onRead(n uint32) uint32 {
+	f.mu.Lock()
+	if f.pendingData == 0 {
+		f.mu.Unlock()
+		return 0
+	}
+	f.pendingData -= n
+	if n > f.delta {
+		n -= f.delta
+		f.delta = 0
+	} else {
+		f.delta -= n
+		n = 0
+	}
+	f.pendingUpdate += n
+	if f.pendingUpdate >= f.limit/4 {
+		wu := f.pendingUpdate
+		f.pendingUpdate = 0
+		f.mu.Unlock()
+		return wu
+	}
+	f.mu.Unlock()
+	return 0
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
new file mode 100644
index 000000000..e6626bf96
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -0,0 +1,477 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// This file is the implementation of a gRPC server using HTTP/2 which
+// uses the standard Go http2 Server implementation (via the
+// http.Handler interface), rather than speaking low-level HTTP/2
+// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
+
+package transport
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/http2"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+)
+
+// NewServerHandlerTransport returns a ServerTransport handling gRPC from
+// inside an http.Handler, or writes an HTTP error to w and returns an error.
+// It requires that the http Server supports HTTP/2.
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
+	if r.ProtoMajor != 2 {
+		msg := "gRPC requires HTTP/2"
+		http.Error(w, msg, http.StatusBadRequest)
+		return nil, errors.New(msg)
+	}
+	if r.Method != "POST" {
+		msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
+		http.Error(w, msg, http.StatusBadRequest)
+		return nil, errors.New(msg)
+	}
+	contentType := r.Header.Get("Content-Type")
+	// TODO: do we assume contentType is lowercase? we did before
+	contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
+	if !validContentType {
+		msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
+		http.Error(w, msg, http.StatusUnsupportedMediaType)
+		return nil, errors.New(msg)
+	}
+	if _, ok := w.(http.Flusher); !ok {
+		msg := "gRPC requires a ResponseWriter supporting http.Flusher"
+		http.Error(w, msg, http.StatusInternalServerError)
+		return nil, errors.New(msg)
+	}
+
+	st := &serverHandlerTransport{
+		rw:             w,
+		req:            r,
+		closedCh:       make(chan struct{}),
+		writes:         make(chan func()),
+		contentType:    contentType,
+		contentSubtype: contentSubtype,
+		stats:          stats,
+	}
+
+	if v := r.Header.Get("grpc-timeout"); v != "" {
+		to, err := decodeTimeout(v)
+		if err != nil {
+			msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
+			http.Error(w, msg, http.StatusBadRequest)
+			return nil, status.Error(codes.Internal, msg)
+		}
+		st.timeoutSet = true
+		st.timeout = to
+	}
+
+	metakv := []string{"content-type", contentType}
+	if r.Host != "" {
+		metakv = append(metakv, ":authority", r.Host)
+	}
+	for k, vv := range r.Header {
+		k = strings.ToLower(k)
+		if isReservedHeader(k) && !isWhitelistedHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			v, err := decodeMetadataHeader(k, v)
+			if err != nil {
+				msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
+				http.Error(w, msg, http.StatusBadRequest)
+				return nil, status.Error(codes.Internal, msg)
+			}
+			metakv = append(metakv, k, v)
+		}
+	}
+	st.headerMD = metadata.Pairs(metakv...)
+
+	return st, nil
+}
+
+// serverHandlerTransport is an implementation of ServerTransport
+// which replies to exactly one gRPC request (exactly one HTTP request),
+// using the net/http.Handler interface. This http.Handler is guaranteed
+// at this point to be speaking over HTTP/2, so it's able to speak valid
+// gRPC.
+type serverHandlerTransport struct {
+	rw         http.ResponseWriter
+	req        *http.Request
+	timeoutSet bool
+	timeout    time.Duration
+
+	headerMD metadata.MD
+
+	closeOnce sync.Once
+	closedCh  chan struct{} // closed on Close
+
+	// writes is a channel of code to run serialized in the
+	// ServeHTTP (HandleStreams) goroutine. The channel is closed
+	// when WriteStatus is called.
+	writes chan func()
+
+	// block concurrent WriteStatus calls
+	// e.g. grpc/(*serverStream).SendMsg/RecvMsg
+	writeStatusMu sync.Mutex
+
+	// we just mirror the request content-type
+	contentType string
+	// we store both contentType and contentSubtype so we don't keep recreating them
+	// TODO make sure this is consistent across handler_server and http2_server
+	contentSubtype string
+
+	stats []stats.Handler
+}
+
+func (ht *serverHandlerTransport) Close(err error) {
+	ht.closeOnce.Do(func() {
+		if logger.V(logLevel) {
+			logger.Infof("Closing serverHandlerTransport: %v", err)
+		}
+		close(ht.closedCh)
+	})
+}
+
+func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
+
+// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
+// the empty string if unknown.
+type strAddr string
+
+func (a strAddr) Network() string {
+	if a != "" {
+		// Per the documentation on net/http.Request.RemoteAddr, if this is
+		// set, it's set to the IP:port of the peer (hence, TCP):
+		// https://golang.org/pkg/net/http/#Request
+		//
+		// If we want to support Unix sockets later, we can
+		// add our own grpc-specific convention within the
+		// grpc codebase to set RemoteAddr to a different
+		// format, or probably better: we can attach it to the
+		// context and use that from serverHandlerTransport.RemoteAddr.
+		return "tcp"
+	}
+	return ""
+}
+
+func (a strAddr) String() string { return string(a) }
+
+// do runs fn in the ServeHTTP goroutine.
+func (ht *serverHandlerTransport) do(fn func()) error {
+	select {
+	case <-ht.closedCh:
+		return ErrConnClosing
+	case ht.writes <- fn:
+		return nil
+	}
+}
+
+func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
+	ht.writeStatusMu.Lock()
+	defer ht.writeStatusMu.Unlock()
+
+	headersWritten := s.updateHeaderSent()
+	err := ht.do(func() {
+		if !headersWritten {
+			ht.writePendingHeaders(s)
+		}
+
+		// And flush, in case no header or body has been sent yet.
+		// This forces a separation of headers and trailers if this is the
+		// first call (for example, in end2end tests's TestNoService).
+		ht.rw.(http.Flusher).Flush()
+
+		h := ht.rw.Header()
+		h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
+		if m := st.Message(); m != "" {
+			h.Set("Grpc-Message", encodeGrpcMessage(m))
+		}
+
+		if p := st.Proto(); p != nil && len(p.Details) > 0 {
+			stBytes, err := proto.Marshal(p)
+			if err != nil {
+				// TODO: return error instead, when callers are able to handle it.
+				panic(err)
+			}
+
+			h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
+		}
+
+		if md := s.Trailer(); len(md) > 0 {
+			for k, vv := range md {
+				// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+				if isReservedHeader(k) {
+					continue
+				}
+				for _, v := range vv {
+					// http2 ResponseWriter mechanism to send undeclared Trailers after
+					// the headers have possibly been written.
+					h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
+				}
+			}
+		}
+	})
+
+	if err == nil { // transport has not been closed
+		// Note: The trailer fields are compressed with hpack after this call returns.
+		// No WireLength field is set here.
+		for _, sh := range ht.stats {
+			sh.HandleRPC(s.Context(), &stats.OutTrailer{
+				Trailer: s.trailer.Copy(),
+			})
+		}
+	}
+	ht.Close(errors.New("finished writing status"))
+	return err
+}
+
+// writePendingHeaders sets common and custom headers on the first
+// write call (Write, WriteHeader, or WriteStatus)
+func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) {
+	ht.writeCommonHeaders(s)
+	ht.writeCustomHeaders(s)
+}
+
+// writeCommonHeaders sets common headers on the first write
+// call (Write, WriteHeader, or WriteStatus).
+func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
+	h := ht.rw.Header()
+	h["Date"] = nil // suppress Date to make tests happy; TODO: restore
+	h.Set("Content-Type", ht.contentType)
+
+	// Predeclare trailers we'll set later in WriteStatus (after the body).
+	// This is a SHOULD in the HTTP RFC, and the way you add (known)
+	// Trailers per the net/http.ResponseWriter contract.
+	// See https://golang.org/pkg/net/http/#ResponseWriter
+	// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+	h.Add("Trailer", "Grpc-Status")
+	h.Add("Trailer", "Grpc-Message")
+	h.Add("Trailer", "Grpc-Status-Details-Bin")
+
+	if s.sendCompress != "" {
+		h.Set("Grpc-Encoding", s.sendCompress)
+	}
+}
+
+// writeCustomHeaders sets custom headers set on the stream via SetHeader
+// on the first write call (Write, WriteHeader, or WriteStatus).
+func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
+	h := ht.rw.Header()
+
+	s.hdrMu.Lock()
+	for k, vv := range s.header {
+		if isReservedHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			h.Add(k, encodeMetadataHeader(k, v))
+		}
+	}
+
+	s.hdrMu.Unlock()
+}
+
+func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+	headersWritten := s.updateHeaderSent()
+	return ht.do(func() {
+		if !headersWritten {
+			ht.writePendingHeaders(s)
+		}
+		ht.rw.Write(hdr)
+		ht.rw.Write(data)
+		ht.rw.(http.Flusher).Flush()
+	})
+}
+
+func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
+	if err := s.SetHeader(md); err != nil {
+		return err
+	}
+
+	headersWritten := s.updateHeaderSent()
+	err := ht.do(func() {
+		if !headersWritten {
+			ht.writePendingHeaders(s)
+		}
+
+		ht.rw.WriteHeader(200)
+		ht.rw.(http.Flusher).Flush()
+	})
+
+	if err == nil {
+		for _, sh := range ht.stats {
+			// Note: The header fields are compressed with hpack after this call returns.
+			// No WireLength field is set here.
+			sh.HandleRPC(s.Context(), &stats.OutHeader{
+				Header:      md.Copy(),
+				Compression: s.sendCompress,
+			})
+		}
+	}
+	return err
+}
+
+func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
+	// With this transport type there will be exactly 1 stream: this HTTP request.
+
+	ctx := ht.req.Context()
+	var cancel context.CancelFunc
+	if ht.timeoutSet {
+		ctx, cancel = context.WithTimeout(ctx, ht.timeout)
+	} else {
+		ctx, cancel = context.WithCancel(ctx)
+	}
+
+	// requestOver is closed when the status has been written via WriteStatus.
+	requestOver := make(chan struct{})
+	go func() {
+		select {
+		case <-requestOver:
+		case <-ht.closedCh:
+		case <-ht.req.Context().Done():
+		}
+		cancel()
+		ht.Close(errors.New("request is done processing"))
+	}()
+
+	req := ht.req
+
+	s := &Stream{
+		id:             0, // irrelevant
+		requestRead:    func(int) {},
+		cancel:         cancel,
+		buf:            newRecvBuffer(),
+		st:             ht,
+		method:         req.URL.Path,
+		recvCompress:   req.Header.Get("grpc-encoding"),
+		contentSubtype: ht.contentSubtype,
+	}
+	pr := &peer.Peer{
+		Addr: ht.RemoteAddr(),
+	}
+	if req.TLS != nil {
+		pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
+	}
+	ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
+	s.ctx = peer.NewContext(ctx, pr)
+	for _, sh := range ht.stats {
+		s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
+		inHeader := &stats.InHeader{
+			FullMethod:  s.method,
+			RemoteAddr:  ht.RemoteAddr(),
+			Compression: s.recvCompress,
+		}
+		sh.HandleRPC(s.ctx, inHeader)
+	}
+	s.trReader = &transportReader{
+		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
+		windowHandler: func(int) {},
+	}
+
+	// readerDone is closed when the Body.Read-ing goroutine exits.
+	readerDone := make(chan struct{})
+	go func() {
+		defer close(readerDone)
+
+		// TODO: minimize garbage, optimize recvBuffer code/ownership
+		const readSize = 8196
+		for buf := make([]byte, readSize); ; {
+			n, err := req.Body.Read(buf)
+			if n > 0 {
+				s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
+				buf = buf[n:]
+			}
+			if err != nil {
+				s.buf.put(recvMsg{err: mapRecvMsgError(err)})
+				return
+			}
+			if len(buf) == 0 {
+				buf = make([]byte, readSize)
+			}
+		}
+	}()
+
+	// startStream is provided by the *grpc.Server's serveStreams.
+	// It starts a goroutine serving s and exits immediately.
+	// The goroutine that is started is the one that then calls
+	// into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
+	startStream(s)
+
+	ht.runStream()
+	close(requestOver)
+
+	// Wait for reading goroutine to finish.
+	req.Body.Close()
+	<-readerDone
+}
+
+func (ht *serverHandlerTransport) runStream() {
+	for {
+		select {
+		case fn := <-ht.writes:
+			fn()
+		case <-ht.closedCh:
+			return
+		}
+	}
+}
+
+func (ht *serverHandlerTransport) IncrMsgSent() {}
+
+func (ht *serverHandlerTransport) IncrMsgRecv() {}
+
+func (ht *serverHandlerTransport) Drain() {
+	panic("Drain() is not implemented")
+}
+
+// mapRecvMsgError returns the non-nil err into the appropriate
+// error value as expected by callers of *grpc.parser.recvMsg.
+// In particular, in can only be:
+//   - io.EOF
+//   - io.ErrUnexpectedEOF
+//   - of type transport.ConnectionError
+//   - an error from the status package
+func mapRecvMsgError(err error) error {
+	if err == io.EOF || err == io.ErrUnexpectedEOF {
+		return err
+	}
+	if se, ok := err.(http2.StreamError); ok {
+		if code, ok := http2ErrConvTab[se.Code]; ok {
+			return status.Error(code, se.Error())
+		}
+	}
+	if strings.Contains(err.Error(), "body closed by handler") {
+		return status.Error(codes.Canceled, err.Error())
+	}
+	return connectionErrorf(true, err, err.Error())
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
new file mode 100644
index 000000000..79ee8aea0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -0,0 +1,1800 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"net/http"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/channelz"
+	icredentials "google.golang.org/grpc/internal/credentials"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/grpcutil"
+	imetadata "google.golang.org/grpc/internal/metadata"
+	istatus "google.golang.org/grpc/internal/status"
+	"google.golang.org/grpc/internal/syscall"
+	"google.golang.org/grpc/internal/transport/networktype"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+)
+
+// clientConnectionCounter counts the number of connections a client has
+// initiated (equal to the number of http2Clients created). Must be accessed
+// atomically.
+var clientConnectionCounter uint64
+
+// http2Client implements the ClientTransport interface with HTTP2.
+type http2Client struct {
+	lastRead  int64 // Keep this field 64-bit aligned. Accessed atomically.
+	ctx       context.Context
+	cancel    context.CancelFunc
+	ctxDone   <-chan struct{} // Cache the ctx.Done() chan.
+	userAgent string
+	// address contains the resolver returned address for this transport.
+	// If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
+	// passed to `NewStream`, when determining the :authority header.
+	address    resolver.Address
+	md         metadata.MD
+	conn       net.Conn // underlying communication channel
+	loopy      *loopyWriter
+	remoteAddr net.Addr
+	localAddr  net.Addr
+	authInfo   credentials.AuthInfo // auth info about the connection
+
+	readerDone chan struct{} // sync point to enable testing.
+	writerDone chan struct{} // sync point to enable testing.
+	// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
+	// that the server sent GoAway on this transport.
+	goAway chan struct{}
+
+	framer *framer
+	// controlBuf delivers all the control related tasks (e.g., window
+	// updates, reset streams, and various settings) to the controller.
+	// Do not access controlBuf with mu held.
+	controlBuf *controlBuffer
+	fc         *trInFlow
+	// The scheme used: https if TLS is on, http otherwise.
+	scheme string
+
+	isSecure bool
+
+	perRPCCreds []credentials.PerRPCCredentials
+
+	kp               keepalive.ClientParameters
+	keepaliveEnabled bool
+
+	statsHandlers []stats.Handler
+
+	initialWindowSize int32
+
+	// configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
+	maxSendHeaderListSize *uint32
+
+	bdpEst *bdpEstimator
+
+	maxConcurrentStreams  uint32
+	streamQuota           int64
+	streamsQuotaAvailable chan struct{}
+	waitingStreams        uint32
+	nextID                uint32
+	registeredCompressors string
+
+	// Do not access controlBuf with mu held.
+	mu            sync.Mutex // guard the following variables
+	state         transportState
+	activeStreams map[uint32]*Stream
+	// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
+	prevGoAwayID uint32
+	// goAwayReason records the http2.ErrCode and debug data received with the
+	// GoAway frame.
+	goAwayReason GoAwayReason
+	// goAwayDebugMessage contains a detailed human readable string about a
+	// GoAway frame, useful for error messages.
+	goAwayDebugMessage string
+	// A condition variable used to signal when the keepalive goroutine should
+	// go dormant. The condition for dormancy is based on the number of active
+	// streams and the `PermitWithoutStream` keepalive client parameter. And
+	// since the number of active streams is guarded by the above mutex, we use
+	// the same for this condition variable as well.
+	kpDormancyCond *sync.Cond
+	// A boolean to track whether the keepalive goroutine is dormant or not.
+	// This is checked before attempting to signal the above condition
+	// variable.
+	kpDormant bool
+
+	// Fields below are for channelz metric collection.
+	channelzID *channelz.Identifier
+	czData     *channelzData
+
+	onClose func(GoAwayReason)
+
+	bufferPool *bufferPool
+
+	connectionID uint64
+}
+
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
+	address := addr.Addr
+	networkType, ok := networktype.Get(addr)
+	if fn != nil {
+		// Special handling for unix scheme with custom dialer. Back in the day,
+		// we did not have a unix resolver and therefore targets with a unix
+		// scheme would end up using the passthrough resolver. So, user's used a
+		// custom dialer in this case and expected the original dial target to
+		// be passed to the custom dialer. Now, we have a unix resolver. But if
+		// a custom dialer is specified, we want to retain the old behavior in
+		// terms of the address being passed to the custom dialer.
+		if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
+			// Supported unix targets are either "unix://absolute-path" or
+			// "unix:relative-path".
+			if filepath.IsAbs(address) {
+				return fn(ctx, "unix://"+address)
+			}
+			return fn(ctx, "unix:"+address)
+		}
+		return fn(ctx, address)
+	}
+	if !ok {
+		networkType, address = parseDialTarget(address)
+	}
+	if networkType == "tcp" && useProxy {
+		return proxyDial(ctx, address, grpcUA)
+	}
+	return (&net.Dialer{}).DialContext(ctx, networkType, address)
+}
+
+func isTemporary(err error) bool {
+	switch err := err.(type) {
+	case interface {
+		Temporary() bool
+	}:
+		return err.Temporary()
+	case interface {
+		Timeout() bool
+	}:
+		// Timeouts may be resolved upon retry, and are thus treated as
+		// temporary.
+		return err.Timeout()
+	}
+	return true
+}
+
+// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
+// and starts to receive messages on it. Non-nil error returns if construction
+// fails.
+func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
+	scheme := "http"
+	ctx, cancel := context.WithCancel(ctx)
+	defer func() {
+		if err != nil {
+			cancel()
+		}
+	}()
+
+	// gRPC, resolver, balancer etc. can specify arbitrary data in the
+	// Attributes field of resolver.Address, which is shoved into connectCtx
+	// and passed to the dialer and credential handshaker. This makes it possible for
+	// address specific arbitrary data to reach custom dialers and credential handshakers.
+	connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
+
+	conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
+	if err != nil {
+		if opts.FailOnNonTempDialError {
+			return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
+		}
+		return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
+	}
+
+	// Any further errors will close the underlying connection
+	defer func(conn net.Conn) {
+		if err != nil {
+			conn.Close()
+		}
+	}(conn)
+
+	// The following defer and goroutine monitor the connectCtx for cancelation
+	// and deadline.  On context expiration, the connection is hard closed and
+	// this function will naturally fail as a result.  Otherwise, the defer
+	// waits for the goroutine to exit to prevent the context from being
+	// monitored (and to prevent the connection from ever being closed) after
+	// returning from this function.
+	ctxMonitorDone := grpcsync.NewEvent()
+	newClientCtx, newClientDone := context.WithCancel(connectCtx)
+	defer func() {
+		newClientDone()         // Awaken the goroutine below if connectCtx hasn't expired.
+		<-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
+	}()
+	go func(conn net.Conn) {
+		defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
+		<-newClientCtx.Done()       // Block until connectCtx expires or the defer above executes.
+		if err := connectCtx.Err(); err != nil {
+			// connectCtx expired before exiting the function.  Hard close the connection.
+			if logger.V(logLevel) {
+				logger.Infof("newClientTransport: aborting due to connectCtx: %v", err)
+			}
+			conn.Close()
+		}
+	}(conn)
+
+	kp := opts.KeepaliveParams
+	// Validate keepalive parameters.
+	if kp.Time == 0 {
+		kp.Time = defaultClientKeepaliveTime
+	}
+	if kp.Timeout == 0 {
+		kp.Timeout = defaultClientKeepaliveTimeout
+	}
+	keepaliveEnabled := false
+	if kp.Time != infinity {
+		if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+			return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
+		}
+		keepaliveEnabled = true
+	}
+	var (
+		isSecure bool
+		authInfo credentials.AuthInfo
+	)
+	transportCreds := opts.TransportCredentials
+	perRPCCreds := opts.PerRPCCredentials
+
+	if b := opts.CredsBundle; b != nil {
+		if t := b.TransportCredentials(); t != nil {
+			transportCreds = t
+		}
+		if t := b.PerRPCCredentials(); t != nil {
+			perRPCCreds = append(perRPCCreds, t)
+		}
+	}
+	if transportCreds != nil {
+		conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
+		if err != nil {
+			return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
+		}
+		for _, cd := range perRPCCreds {
+			if cd.RequireTransportSecurity() {
+				if ci, ok := authInfo.(interface {
+					GetCommonAuthInfo() credentials.CommonAuthInfo
+				}); ok {
+					secLevel := ci.GetCommonAuthInfo().SecurityLevel
+					if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity {
+						return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection")
+					}
+				}
+			}
+		}
+		isSecure = true
+		if transportCreds.Info().SecurityProtocol == "tls" {
+			scheme = "https"
+		}
+	}
+	dynamicWindow := true
+	icwz := int32(initialWindowSize)
+	if opts.InitialConnWindowSize >= defaultWindowSize {
+		icwz = opts.InitialConnWindowSize
+		dynamicWindow = false
+	}
+	writeBufSize := opts.WriteBufferSize
+	readBufSize := opts.ReadBufferSize
+	maxHeaderListSize := defaultClientMaxHeaderListSize
+	if opts.MaxHeaderListSize != nil {
+		maxHeaderListSize = *opts.MaxHeaderListSize
+	}
+	t := &http2Client{
+		ctx:                   ctx,
+		ctxDone:               ctx.Done(), // Cache Done chan.
+		cancel:                cancel,
+		userAgent:             opts.UserAgent,
+		registeredCompressors: grpcutil.RegisteredCompressors(),
+		address:               addr,
+		conn:                  conn,
+		remoteAddr:            conn.RemoteAddr(),
+		localAddr:             conn.LocalAddr(),
+		authInfo:              authInfo,
+		readerDone:            make(chan struct{}),
+		writerDone:            make(chan struct{}),
+		goAway:                make(chan struct{}),
+		framer:                newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
+		fc:                    &trInFlow{limit: uint32(icwz)},
+		scheme:                scheme,
+		activeStreams:         make(map[uint32]*Stream),
+		isSecure:              isSecure,
+		perRPCCreds:           perRPCCreds,
+		kp:                    kp,
+		statsHandlers:         opts.StatsHandlers,
+		initialWindowSize:     initialWindowSize,
+		nextID:                1,
+		maxConcurrentStreams:  defaultMaxStreamsClient,
+		streamQuota:           defaultMaxStreamsClient,
+		streamsQuotaAvailable: make(chan struct{}, 1),
+		czData:                new(channelzData),
+		keepaliveEnabled:      keepaliveEnabled,
+		bufferPool:            newBufferPool(),
+		onClose:               onClose,
+	}
+	// Add peer information to the http2client context.
+	t.ctx = peer.NewContext(t.ctx, t.getPeer())
+
+	if md, ok := addr.Metadata.(*metadata.MD); ok {
+		t.md = *md
+	} else if md := imetadata.Get(addr); md != nil {
+		t.md = md
+	}
+	t.controlBuf = newControlBuffer(t.ctxDone)
+	if opts.InitialWindowSize >= defaultWindowSize {
+		t.initialWindowSize = opts.InitialWindowSize
+		dynamicWindow = false
+	}
+	if dynamicWindow {
+		t.bdpEst = &bdpEstimator{
+			bdp:               initialWindowSize,
+			updateFlowControl: t.updateFlowControl,
+		}
+	}
+	for _, sh := range t.statsHandlers {
+		t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
+			RemoteAddr: t.remoteAddr,
+			LocalAddr:  t.localAddr,
+		})
+		connBegin := &stats.ConnBegin{
+			Client: true,
+		}
+		sh.HandleConn(t.ctx, connBegin)
+	}
+	t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
+	if err != nil {
+		return nil, err
+	}
+	if t.keepaliveEnabled {
+		t.kpDormancyCond = sync.NewCond(&t.mu)
+		go t.keepalive()
+	}
+
+	// Start the reader goroutine for incoming messages. Each transport has a
+	// dedicated goroutine which reads HTTP2 frames from the network. Then it
+	// dispatches the frame to the corresponding stream entity.  When the
+	// server preface is received, readerErrCh is closed.  If an error occurs
+	// first, an error is pushed to the channel.  This must be checked before
+	// returning from this function.
+	readerErrCh := make(chan error, 1)
+	go t.reader(readerErrCh)
+	defer func() {
+		if err == nil {
+			err = <-readerErrCh
+		}
+		if err != nil {
+			t.Close(err)
+		}
+	}()
+
+	// Send connection preface to server.
+	n, err := t.conn.Write(clientPreface)
+	if err != nil {
+		err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+		return nil, err
+	}
+	if n != len(clientPreface) {
+		err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+		return nil, err
+	}
+	var ss []http2.Setting
+
+	if t.initialWindowSize != defaultWindowSize {
+		ss = append(ss, http2.Setting{
+			ID:  http2.SettingInitialWindowSize,
+			Val: uint32(t.initialWindowSize),
+		})
+	}
+	if opts.MaxHeaderListSize != nil {
+		ss = append(ss, http2.Setting{
+			ID:  http2.SettingMaxHeaderListSize,
+			Val: *opts.MaxHeaderListSize,
+		})
+	}
+	err = t.framer.fr.WriteSettings(ss...)
+	if err != nil {
+		err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+		return nil, err
+	}
+	// Adjust the connection flow control window if needed.
+	if delta := uint32(icwz - defaultWindowSize); delta > 0 {
+		if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
+			err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+			return nil, err
+		}
+	}
+
+	t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
+
+	if err := t.framer.writer.Flush(); err != nil {
+		return nil, err
+	}
+	go func() {
+		t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
+		err := t.loopy.run()
+		if logger.V(logLevel) {
+			logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
+		}
+		// Do not close the transport.  Let reader goroutine handle it since
+		// there might be data in the buffers.
+		t.conn.Close()
+		t.controlBuf.finish()
+		close(t.writerDone)
+	}()
+	return t, nil
+}
+
+func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
+	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
+	s := &Stream{
+		ct:             t,
+		done:           make(chan struct{}),
+		method:         callHdr.Method,
+		sendCompress:   callHdr.SendCompress,
+		buf:            newRecvBuffer(),
+		headerChan:     make(chan struct{}),
+		contentSubtype: callHdr.ContentSubtype,
+		doneFunc:       callHdr.DoneFunc,
+	}
+	s.wq = newWriteQuota(defaultWriteQuota, s.done)
+	s.requestRead = func(n int) {
+		t.adjustWindow(s, uint32(n))
+	}
+	// The client side stream context should have exactly the same life cycle with the user provided context.
+	// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
+	// So we use the original context here instead of creating a copy.
+	s.ctx = ctx
+	s.trReader = &transportReader{
+		reader: &recvBufferReader{
+			ctx:     s.ctx,
+			ctxDone: s.ctx.Done(),
+			recv:    s.buf,
+			closeStream: func(err error) {
+				t.CloseStream(s, err)
+			},
+			freeBuffer: t.bufferPool.put,
+		},
+		windowHandler: func(n int) {
+			t.updateWindow(s, uint32(n))
+		},
+	}
+	return s
+}
+
+func (t *http2Client) getPeer() *peer.Peer {
+	return &peer.Peer{
+		Addr:     t.remoteAddr,
+		AuthInfo: t.authInfo, // Can be nil
+	}
+}
+
+func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
+	aud := t.createAudience(callHdr)
+	ri := credentials.RequestInfo{
+		Method:   callHdr.Method,
+		AuthInfo: t.authInfo,
+	}
+	ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri)
+	authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
+	if err != nil {
+		return nil, err
+	}
+	callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
+	if err != nil {
+		return nil, err
+	}
+	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+	// first and create a slice of that exact size.
+	// Make the slice of certain predictable size to reduce allocations made by append.
+	hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
+	hfLen += len(authData) + len(callAuthData)
+	headerFields := make([]hpack.HeaderField, 0, hfLen)
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
+	if callHdr.PreviousAttempts > 0 {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
+	}
+
+	registeredCompressors := t.registeredCompressors
+	if callHdr.SendCompress != "" {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+		// Include the outgoing compressor name when compressor is not registered
+		// via encoding.RegisterCompressor. This is possible when client uses
+		// WithCompressor dial option.
+		if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
+			if registeredCompressors != "" {
+				registeredCompressors += ","
+			}
+			registeredCompressors += callHdr.SendCompress
+		}
+	}
+
+	if registeredCompressors != "" {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
+	}
+	if dl, ok := ctx.Deadline(); ok {
+		// Send out timeout regardless its value. The server can detect timeout context by itself.
+		// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
+		timeout := time.Until(dl)
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
+	}
+	for k, v := range authData {
+		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+	}
+	for k, v := range callAuthData {
+		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+	}
+	if b := stats.OutgoingTags(ctx); b != nil {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
+	}
+	if b := stats.OutgoingTrace(ctx); b != nil {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
+	}
+
+	if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+		var k string
+		for k, vv := range md {
+			// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
+			if isReservedHeader(k) {
+				continue
+			}
+			for _, v := range vv {
+				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+			}
+		}
+		for _, vv := range added {
+			for i, v := range vv {
+				if i%2 == 0 {
+					k = strings.ToLower(v)
+					continue
+				}
+				// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
+				if isReservedHeader(k) {
+					continue
+				}
+				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+			}
+		}
+	}
+	for k, vv := range t.md {
+		if isReservedHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+		}
+	}
+	return headerFields, nil
+}
+
+func (t *http2Client) createAudience(callHdr *CallHdr) string {
+	// Create an audience string only if needed.
+	if len(t.perRPCCreds) == 0 && callHdr.Creds == nil {
+		return ""
+	}
+	// Construct URI required to get auth request metadata.
+	// Omit port if it is the default one.
+	host := strings.TrimSuffix(callHdr.Host, ":443")
+	pos := strings.LastIndex(callHdr.Method, "/")
+	if pos == -1 {
+		pos = len(callHdr.Method)
+	}
+	return "https://" + host + callHdr.Method[:pos]
+}
+
+func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
+	if len(t.perRPCCreds) == 0 {
+		return nil, nil
+	}
+	authData := map[string]string{}
+	for _, c := range t.perRPCCreds {
+		data, err := c.GetRequestMetadata(ctx, audience)
+		if err != nil {
+			if st, ok := status.FromError(err); ok {
+				// Restrict the code to the list allowed by gRFC A54.
+				if istatus.IsRestrictedControlPlaneCode(st) {
+					err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
+				}
+				return nil, err
+			}
+
+			return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
+		}
+		for k, v := range data {
+			// Capital header names are illegal in HTTP/2.
+			k = strings.ToLower(k)
+			authData[k] = v
+		}
+	}
+	return authData, nil
+}
+
+func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
+	var callAuthData map[string]string
+	// Check if credentials.PerRPCCredentials were provided via call options.
+	// Note: if these credentials are provided both via dial options and call
+	// options, then both sets of credentials will be applied.
+	if callCreds := callHdr.Creds; callCreds != nil {
+		if callCreds.RequireTransportSecurity() {
+			ri, _ := credentials.RequestInfoFromContext(ctx)
+			if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil {
+				return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+			}
+		}
+		data, err := callCreds.GetRequestMetadata(ctx, audience)
+		if err != nil {
+			if st, ok := status.FromError(err); ok {
+				// Restrict the code to the list allowed by gRFC A54.
+				if istatus.IsRestrictedControlPlaneCode(st) {
+					err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
+				}
+				return nil, err
+			}
+			return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
+		}
+		callAuthData = make(map[string]string, len(data))
+		for k, v := range data {
+			// Capital header names are illegal in HTTP/2
+			k = strings.ToLower(k)
+			callAuthData[k] = v
+		}
+	}
+	return callAuthData, nil
+}
+
+// NewStreamError wraps an error and reports additional information.  Typically
+// NewStream errors result in transparent retry, as they mean nothing went onto
+// the wire.  However, there are two notable exceptions:
+//
+//  1. If the stream headers violate the max header list size allowed by the
+//     server.  It's possible this could succeed on another transport, even if
+//     it's unlikely, but do not transparently retry.
+//  2. If the credentials errored when requesting their headers.  In this case,
+//     it's possible a retry can fix the problem, but indefinitely transparently
+//     retrying is not appropriate as it is likely the credentials, if they can
+//     eventually succeed, would need I/O to do so.
+type NewStreamError struct {
+	Err error
+
+	AllowTransparentRetry bool
+}
+
+func (e NewStreamError) Error() string {
+	return e.Err.Error()
+}
+
+// NewStream creates a stream and registers it into the transport as "active"
+// streams.  All non-nil errors returned will be *NewStreamError.
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
+	ctx = peer.NewContext(ctx, t.getPeer())
+
+	// ServerName field of the resolver returned address takes precedence over
+	// Host field of CallHdr to determine the :authority header. This is because,
+	// the ServerName field takes precedence for server authentication during
+	// TLS handshake, and the :authority header should match the value used
+	// for server authentication.
+	if t.address.ServerName != "" {
+		newCallHdr := *callHdr
+		newCallHdr.Host = t.address.ServerName
+		callHdr = &newCallHdr
+	}
+
+	headerFields, err := t.createHeaderFields(ctx, callHdr)
+	if err != nil {
+		return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
+	}
+	s := t.newStream(ctx, callHdr)
+	cleanup := func(err error) {
+		if s.swapState(streamDone) == streamDone {
+			// If it was already done, return.
+			return
+		}
+		// The stream was unprocessed by the server.
+		atomic.StoreUint32(&s.unprocessed, 1)
+		s.write(recvMsg{err: err})
+		close(s.done)
+		// If headerChan isn't closed, then close it.
+		if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+			close(s.headerChan)
+		}
+	}
+	hdr := &headerFrame{
+		hf:        headerFields,
+		endStream: false,
+		initStream: func(id uint32) error {
+			t.mu.Lock()
+			// TODO: handle transport closure in loopy instead and remove this
+			// initStream is never called when transport is draining.
+			if t.state == closing {
+				t.mu.Unlock()
+				cleanup(ErrConnClosing)
+				return ErrConnClosing
+			}
+			if channelz.IsOn() {
+				atomic.AddInt64(&t.czData.streamsStarted, 1)
+				atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
+			}
+			// If the keepalive goroutine has gone dormant, wake it up.
+			if t.kpDormant {
+				t.kpDormancyCond.Signal()
+			}
+			t.mu.Unlock()
+			return nil
+		},
+		onOrphaned: cleanup,
+		wq:         s.wq,
+	}
+	firstTry := true
+	var ch chan struct{}
+	transportDrainRequired := false
+	checkForStreamQuota := func(it interface{}) bool {
+		if t.streamQuota <= 0 { // Can go negative if server decreases it.
+			if firstTry {
+				t.waitingStreams++
+			}
+			ch = t.streamsQuotaAvailable
+			return false
+		}
+		if !firstTry {
+			t.waitingStreams--
+		}
+		t.streamQuota--
+		h := it.(*headerFrame)
+		h.streamID = t.nextID
+		t.nextID += 2
+
+		// Drain client transport if nextID > MaxStreamID which signals gRPC that
+		// the connection is closed and a new one must be created for subsequent RPCs.
+		transportDrainRequired = t.nextID > MaxStreamID
+
+		s.id = h.streamID
+		s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
+		t.mu.Lock()
+		if t.activeStreams == nil { // Can be niled from Close().
+			t.mu.Unlock()
+			return false // Don't create a stream if the transport is already closed.
+		}
+		t.activeStreams[s.id] = s
+		t.mu.Unlock()
+		if t.streamQuota > 0 && t.waitingStreams > 0 {
+			select {
+			case t.streamsQuotaAvailable <- struct{}{}:
+			default:
+			}
+		}
+		return true
+	}
+	var hdrListSizeErr error
+	checkForHeaderListSize := func(it interface{}) bool {
+		if t.maxSendHeaderListSize == nil {
+			return true
+		}
+		hdrFrame := it.(*headerFrame)
+		var sz int64
+		for _, f := range hdrFrame.hf {
+			if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
+				hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
+				return false
+			}
+		}
+		return true
+	}
+	for {
+		success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
+			return checkForHeaderListSize(it) && checkForStreamQuota(it)
+		}, hdr)
+		if err != nil {
+			// Connection closed.
+			return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
+		}
+		if success {
+			break
+		}
+		if hdrListSizeErr != nil {
+			return nil, &NewStreamError{Err: hdrListSizeErr}
+		}
+		firstTry = false
+		select {
+		case <-ch:
+		case <-ctx.Done():
+			return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
+		case <-t.goAway:
+			return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
+		case <-t.ctx.Done():
+			return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
+		}
+	}
+	if len(t.statsHandlers) != 0 {
+		header, ok := metadata.FromOutgoingContext(ctx)
+		if ok {
+			header.Set("user-agent", t.userAgent)
+		} else {
+			header = metadata.Pairs("user-agent", t.userAgent)
+		}
+		for _, sh := range t.statsHandlers {
+			// Note: The header fields are compressed with hpack after this call returns.
+			// No WireLength field is set here.
+			// Note: Creating a new stats object to prevent pollution.
+			outHeader := &stats.OutHeader{
+				Client:      true,
+				FullMethod:  callHdr.Method,
+				RemoteAddr:  t.remoteAddr,
+				LocalAddr:   t.localAddr,
+				Compression: callHdr.SendCompress,
+				Header:      header,
+			}
+			sh.HandleRPC(s.ctx, outHeader)
+		}
+	}
+	if transportDrainRequired {
+		if logger.V(logLevel) {
+			logger.Infof("transport: t.nextID > MaxStreamID. Draining")
+		}
+		t.GracefulClose()
+	}
+	return s, nil
+}
+
+// CloseStream clears the footprint of a stream when the stream is not needed any more.
+// This must not be executed in reader's goroutine.
+func (t *http2Client) CloseStream(s *Stream, err error) {
+	var (
+		rst     bool
+		rstCode http2.ErrCode
+	)
+	if err != nil {
+		rst = true
+		rstCode = http2.ErrCodeCancel
+	}
+	t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
+}
+
+func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
+	// Set stream status to done.
+	if s.swapState(streamDone) == streamDone {
+		// If it was already done, return.  If multiple closeStream calls
+		// happen simultaneously, wait for the first to finish.
+		<-s.done
+		return
+	}
+	// status and trailers can be updated here without any synchronization because the stream goroutine will
+	// only read it after it sees an io.EOF error from read or write and we'll write those errors
+	// only after updating this.
+	s.status = st
+	if len(mdata) > 0 {
+		s.trailer = mdata
+	}
+	if err != nil {
+		// This will unblock reads eventually.
+		s.write(recvMsg{err: err})
+	}
+	// If headerChan isn't closed, then close it.
+	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+		s.noHeaders = true
+		close(s.headerChan)
+	}
+	cleanup := &cleanupStream{
+		streamID: s.id,
+		onWrite: func() {
+			t.mu.Lock()
+			if t.activeStreams != nil {
+				delete(t.activeStreams, s.id)
+			}
+			t.mu.Unlock()
+			if channelz.IsOn() {
+				if eosReceived {
+					atomic.AddInt64(&t.czData.streamsSucceeded, 1)
+				} else {
+					atomic.AddInt64(&t.czData.streamsFailed, 1)
+				}
+			}
+		},
+		rst:     rst,
+		rstCode: rstCode,
+	}
+	addBackStreamQuota := func(interface{}) bool {
+		t.streamQuota++
+		if t.streamQuota > 0 && t.waitingStreams > 0 {
+			select {
+			case t.streamsQuotaAvailable <- struct{}{}:
+			default:
+			}
+		}
+		return true
+	}
+	t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
+	// This will unblock write.
+	close(s.done)
+	if s.doneFunc != nil {
+		s.doneFunc()
+	}
+}
+
+// Close kicks off the shutdown process of the transport. This should be called
+// only once on a transport. Once it is called, the transport should not be
+// accessed any more.
+func (t *http2Client) Close(err error) {
+	t.mu.Lock()
+	// Make sure we only close once.
+	if t.state == closing {
+		t.mu.Unlock()
+		return
+	}
+	if logger.V(logLevel) {
+		logger.Infof("transport: closing: %v", err)
+	}
+	// Call t.onClose ASAP to prevent the client from attempting to create new
+	// streams.
+	if t.state != draining {
+		t.onClose(GoAwayInvalid)
+	}
+	t.state = closing
+	streams := t.activeStreams
+	t.activeStreams = nil
+	if t.kpDormant {
+		// If the keepalive goroutine is blocked on this condition variable, we
+		// should unblock it so that the goroutine eventually exits.
+		t.kpDormancyCond.Signal()
+	}
+	t.mu.Unlock()
+	t.controlBuf.finish()
+	t.cancel()
+	t.conn.Close()
+	channelz.RemoveEntry(t.channelzID)
+	// Append info about previous goaways if there were any, since this may be important
+	// for understanding the root cause for this connection to be closed.
+	_, goAwayDebugMessage := t.GetGoAwayReason()
+
+	var st *status.Status
+	if len(goAwayDebugMessage) > 0 {
+		st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
+		err = st.Err()
+	} else {
+		st = status.New(codes.Unavailable, err.Error())
+	}
+
+	// Notify all active streams.
+	for _, s := range streams {
+		t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
+	}
+	for _, sh := range t.statsHandlers {
+		connEnd := &stats.ConnEnd{
+			Client: true,
+		}
+		sh.HandleConn(t.ctx, connEnd)
+	}
+}
+
+// GracefulClose sets the state to draining, which prevents new streams from
+// being created and causes the transport to be closed when the last active
+// stream is closed.  If there are no active streams, the transport is closed
+// immediately.  This does nothing if the transport is already draining or
+// closing.
+func (t *http2Client) GracefulClose() {
+	t.mu.Lock()
+	// Make sure we move to draining only from active.
+	if t.state == draining || t.state == closing {
+		t.mu.Unlock()
+		return
+	}
+	if logger.V(logLevel) {
+		logger.Infof("transport: GracefulClose called")
+	}
+	t.onClose(GoAwayInvalid)
+	t.state = draining
+	active := len(t.activeStreams)
+	t.mu.Unlock()
+	if active == 0 {
+		t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
+		return
+	}
+	t.controlBuf.put(&incomingGoAway{})
+}
+
+// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
+// should proceed only if Write returns nil.
+func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+	if opts.Last {
+		// If it's the last message, update stream state.
+		if !s.compareAndSwapState(streamActive, streamWriteDone) {
+			return errStreamDone
+		}
+	} else if s.getState() != streamActive {
+		return errStreamDone
+	}
+	df := &dataFrame{
+		streamID:  s.id,
+		endStream: opts.Last,
+		h:         hdr,
+		d:         data,
+	}
+	if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
+		if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+			return err
+		}
+	}
+	return t.controlBuf.put(df)
+}
+
+func (t *http2Client) getStream(f http2.Frame) *Stream {
+	t.mu.Lock()
+	s := t.activeStreams[f.Header().StreamID]
+	t.mu.Unlock()
+	return s
+}
+
+// adjustWindow sends out extra window update over the initial window size
+// of stream if the application is requesting data larger in size than
+// the window.
+func (t *http2Client) adjustWindow(s *Stream, n uint32) {
+	if w := s.fc.maybeAdjust(n); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
+	}
+}
+
+// updateWindow adjusts the inbound quota for the stream.
+// Window updates will be sent out when the cumulative quota
+// exceeds the corresponding threshold.
+func (t *http2Client) updateWindow(s *Stream, n uint32) {
+	if w := s.fc.onRead(n); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
+	}
+}
+
+// updateFlowControl updates the incoming flow control windows
+// for the transport and the stream based on the current bdp
+// estimation.
+func (t *http2Client) updateFlowControl(n uint32) {
+	updateIWS := func(interface{}) bool {
+		t.initialWindowSize = int32(n)
+		t.mu.Lock()
+		for _, s := range t.activeStreams {
+			s.fc.newLimit(n)
+		}
+		t.mu.Unlock()
+		return true
+	}
+	t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
+	t.controlBuf.put(&outgoingSettings{
+		ss: []http2.Setting{
+			{
+				ID:  http2.SettingInitialWindowSize,
+				Val: n,
+			},
+		},
+	})
+}
+
+func (t *http2Client) handleData(f *http2.DataFrame) {
+	size := f.Header().Length
+	var sendBDPPing bool
+	if t.bdpEst != nil {
+		sendBDPPing = t.bdpEst.add(size)
+	}
+	// Decouple connection's flow control from application's read.
+	// An update on connection's flow control should not depend on
+	// whether user application has read the data or not. Such a
+	// restriction is already imposed on the stream's flow control,
+	// and therefore the sender will be blocked anyways.
+	// Decoupling the connection flow control will prevent other
+	// active(fast) streams from starving in presence of slow or
+	// inactive streams.
+	//
+	if w := t.fc.onData(size); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{
+			streamID:  0,
+			increment: w,
+		})
+	}
+	if sendBDPPing {
+		// Avoid excessive ping detection (e.g. in an L7 proxy)
+		// by sending a window update prior to the BDP ping.
+
+		if w := t.fc.reset(); w > 0 {
+			t.controlBuf.put(&outgoingWindowUpdate{
+				streamID:  0,
+				increment: w,
+			})
+		}
+
+		t.controlBuf.put(bdpPing)
+	}
+	// Select the right stream to dispatch.
+	s := t.getStream(f)
+	if s == nil {
+		return
+	}
+	if size > 0 {
+		if err := s.fc.onData(size); err != nil {
+			t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
+			return
+		}
+		if f.Header().Flags.Has(http2.FlagDataPadded) {
+			if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+				t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
+			}
+		}
+		// TODO(bradfitz, zhaoq): A copy is required here because there is no
+		// guarantee f.Data() is consumed before the arrival of next frame.
+		// Can this copy be eliminated?
+		if len(f.Data()) > 0 {
+			buffer := t.bufferPool.get()
+			buffer.Reset()
+			buffer.Write(f.Data())
+			s.write(recvMsg{buffer: buffer})
+		}
+	}
+	// The server has closed the stream without sending trailers.  Record that
+	// the read direction is closed, and set the status appropriately.
+	if f.StreamEnded() {
+		t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
+	}
+}
+
+func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
+	s := t.getStream(f)
+	if s == nil {
+		return
+	}
+	if f.ErrCode == http2.ErrCodeRefusedStream {
+		// The stream was unprocessed by the server.
+		atomic.StoreUint32(&s.unprocessed, 1)
+	}
+	statusCode, ok := http2ErrConvTab[f.ErrCode]
+	if !ok {
+		if logger.V(logLevel) {
+			logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode)
+		}
+		statusCode = codes.Unknown
+	}
+	if statusCode == codes.Canceled {
+		if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
+			// Our deadline was already exceeded, and that was likely the cause
+			// of this cancelation.  Alter the status code accordingly.
+			statusCode = codes.DeadlineExceeded
+		}
+	}
+	t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
+}
+
+func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
+	if f.IsAck() {
+		return
+	}
+	var maxStreams *uint32
+	var ss []http2.Setting
+	var updateFuncs []func()
+	f.ForeachSetting(func(s http2.Setting) error {
+		switch s.ID {
+		case http2.SettingMaxConcurrentStreams:
+			maxStreams = new(uint32)
+			*maxStreams = s.Val
+		case http2.SettingMaxHeaderListSize:
+			updateFuncs = append(updateFuncs, func() {
+				t.maxSendHeaderListSize = new(uint32)
+				*t.maxSendHeaderListSize = s.Val
+			})
+		default:
+			ss = append(ss, s)
+		}
+		return nil
+	})
+	if isFirst && maxStreams == nil {
+		maxStreams = new(uint32)
+		*maxStreams = math.MaxUint32
+	}
+	sf := &incomingSettings{
+		ss: ss,
+	}
+	if maxStreams != nil {
+		updateStreamQuota := func() {
+			delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
+			t.maxConcurrentStreams = *maxStreams
+			t.streamQuota += delta
+			if delta > 0 && t.waitingStreams > 0 {
+				close(t.streamsQuotaAvailable) // wake all of them up.
+				t.streamsQuotaAvailable = make(chan struct{}, 1)
+			}
+		}
+		updateFuncs = append(updateFuncs, updateStreamQuota)
+	}
+	t.controlBuf.executeAndPut(func(interface{}) bool {
+		for _, f := range updateFuncs {
+			f()
+		}
+		return true
+	}, sf)
+}
+
+func (t *http2Client) handlePing(f *http2.PingFrame) {
+	if f.IsAck() {
+		// Maybe it's a BDP ping.
+		if t.bdpEst != nil {
+			t.bdpEst.calculate(f.Data)
+		}
+		return
+	}
+	pingAck := &ping{ack: true}
+	copy(pingAck.data[:], f.Data[:])
+	t.controlBuf.put(pingAck)
+}
+
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+	t.mu.Lock()
+	if t.state == closing {
+		t.mu.Unlock()
+		return
+	}
+	if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
+		if logger.V(logLevel) {
+			logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+		}
+	}
+	id := f.LastStreamID
+	if id > 0 && id%2 == 0 {
+		t.mu.Unlock()
+		t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
+		return
+	}
+	// A client can receive multiple GoAways from the server (see
+	// https://github.com/grpc/grpc-go/issues/1387).  The idea is that the first
+	// GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
+	// sent after an RTT delay with the ID of the last stream the server will
+	// process.
+	//
+	// Therefore, when we get the first GoAway we don't necessarily close any
+	// streams. While in case of second GoAway we close all streams created after
+	// the GoAwayId. This way streams that were in-flight while the GoAway from
+	// server was being sent don't get killed.
+	select {
+	case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
+		// If there are multiple GoAways the first one should always have an ID greater than the following ones.
+		if id > t.prevGoAwayID {
+			t.mu.Unlock()
+			t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
+			return
+		}
+	default:
+		t.setGoAwayReason(f)
+		close(t.goAway)
+		defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held.
+		// Notify the clientconn about the GOAWAY before we set the state to
+		// draining, to allow the client to stop attempting to create streams
+		// before disallowing new streams on this connection.
+		if t.state != draining {
+			t.onClose(t.goAwayReason)
+			t.state = draining
+		}
+	}
+	// All streams with IDs greater than the GoAwayId
+	// and smaller than the previous GoAway ID should be killed.
+	upperLimit := t.prevGoAwayID
+	if upperLimit == 0 { // This is the first GoAway Frame.
+		upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
+	}
+
+	t.prevGoAwayID = id
+	if len(t.activeStreams) == 0 {
+		t.mu.Unlock()
+		t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
+		return
+	}
+
+	streamsToClose := make([]*Stream, 0)
+	for streamID, stream := range t.activeStreams {
+		if streamID > id && streamID <= upperLimit {
+			// The stream was unprocessed by the server.
+			if streamID > id && streamID <= upperLimit {
+				atomic.StoreUint32(&stream.unprocessed, 1)
+				streamsToClose = append(streamsToClose, stream)
+			}
+		}
+	}
+	t.mu.Unlock()
+	// Called outside t.mu because closeStream can take controlBuf's mu, which
+	// could induce deadlock and is not allowed.
+	for _, stream := range streamsToClose {
+		t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
+	}
+}
+
+// setGoAwayReason sets the value of t.goAwayReason based
+// on the GoAway frame received.
+// It expects a lock on transport's mutext to be held by
+// the caller.
+func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
+	t.goAwayReason = GoAwayNoReason
+	switch f.ErrCode {
+	case http2.ErrCodeEnhanceYourCalm:
+		if string(f.DebugData()) == "too_many_pings" {
+			t.goAwayReason = GoAwayTooManyPings
+		}
+	}
+	if len(f.DebugData()) == 0 {
+		t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode)
+	} else {
+		t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData()))
+	}
+}
+
+func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	return t.goAwayReason, t.goAwayDebugMessage
+}
+
+func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+	t.controlBuf.put(&incomingWindowUpdate{
+		streamID:  f.Header().StreamID,
+		increment: f.Increment,
+	})
+}
+
+// operateHeaders takes action on the decoded headers.
+func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
+	s := t.getStream(frame)
+	if s == nil {
+		return
+	}
+	endStream := frame.StreamEnded()
+	atomic.StoreUint32(&s.bytesReceived, 1)
+	initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
+
+	if !initialHeader && !endStream {
+		// As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
+		st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream")
+		t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false)
+		return
+	}
+
+	// frame.Truncated is set to true when framer detects that the current header
+	// list size hits MaxHeaderListSize limit.
+	if frame.Truncated {
+		se := status.New(codes.Internal, "peer header list size exceeded limit")
+		t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream)
+		return
+	}
+
+	var (
+		// If a gRPC Response-Headers has already been received, then it means
+		// that the peer is speaking gRPC and we are in gRPC mode.
+		isGRPC         = !initialHeader
+		mdata          = make(map[string][]string)
+		contentTypeErr = "malformed header: missing HTTP content-type"
+		grpcMessage    string
+		statusGen      *status.Status
+		recvCompress   string
+		httpStatusCode *int
+		httpStatusErr  string
+		rawStatusCode  = codes.Unknown
+		// headerError is set if an error is encountered while parsing the headers
+		headerError string
+	)
+
+	if initialHeader {
+		httpStatusErr = "malformed header: missing HTTP status"
+	}
+
+	for _, hf := range frame.Fields {
+		switch hf.Name {
+		case "content-type":
+			if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType {
+				contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value)
+				break
+			}
+			contentTypeErr = ""
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			isGRPC = true
+		case "grpc-encoding":
+			recvCompress = hf.Value
+		case "grpc-status":
+			code, err := strconv.ParseInt(hf.Value, 10, 32)
+			if err != nil {
+				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
+				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+				return
+			}
+			rawStatusCode = codes.Code(uint32(code))
+		case "grpc-message":
+			grpcMessage = decodeGrpcMessage(hf.Value)
+		case "grpc-status-details-bin":
+			var err error
+			statusGen, err = decodeGRPCStatusDetails(hf.Value)
+			if err != nil {
+				headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err)
+			}
+		case ":status":
+			if hf.Value == "200" {
+				httpStatusErr = ""
+				statusCode := 200
+				httpStatusCode = &statusCode
+				break
+			}
+
+			c, err := strconv.ParseInt(hf.Value, 10, 32)
+			if err != nil {
+				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
+				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+				return
+			}
+			statusCode := int(c)
+			httpStatusCode = &statusCode
+
+			httpStatusErr = fmt.Sprintf(
+				"unexpected HTTP status code received from server: %d (%s)",
+				statusCode,
+				http.StatusText(statusCode),
+			)
+		default:
+			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+				break
+			}
+			v, err := decodeMetadataHeader(hf.Name, hf.Value)
+			if err != nil {
+				headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
+				logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], v)
+		}
+	}
+
+	if !isGRPC || httpStatusErr != "" {
+		var code = codes.Internal // when header does not include HTTP status, return INTERNAL
+
+		if httpStatusCode != nil {
+			var ok bool
+			code, ok = HTTPStatusConvTab[*httpStatusCode]
+			if !ok {
+				code = codes.Unknown
+			}
+		}
+		var errs []string
+		if httpStatusErr != "" {
+			errs = append(errs, httpStatusErr)
+		}
+		if contentTypeErr != "" {
+			errs = append(errs, contentTypeErr)
+		}
+		// Verify the HTTP response is a 200.
+		se := status.New(code, strings.Join(errs, "; "))
+		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+		return
+	}
+
+	if headerError != "" {
+		se := status.New(codes.Internal, headerError)
+		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+		return
+	}
+
+	isHeader := false
+
+	// If headerChan hasn't been closed yet
+	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+		s.headerValid = true
+		if !endStream {
+			// HEADERS frame block carries a Response-Headers.
+			isHeader = true
+			// These values can be set without any synchronization because
+			// stream goroutine will read it only after seeing a closed
+			// headerChan which we'll close after setting this.
+			s.recvCompress = recvCompress
+			if len(mdata) > 0 {
+				s.header = mdata
+			}
+		} else {
+			// HEADERS frame block carries a Trailers-Only.
+			s.noHeaders = true
+		}
+		close(s.headerChan)
+	}
+
+	for _, sh := range t.statsHandlers {
+		if isHeader {
+			inHeader := &stats.InHeader{
+				Client:      true,
+				WireLength:  int(frame.Header().Length),
+				Header:      metadata.MD(mdata).Copy(),
+				Compression: s.recvCompress,
+			}
+			sh.HandleRPC(s.ctx, inHeader)
+		} else {
+			inTrailer := &stats.InTrailer{
+				Client:     true,
+				WireLength: int(frame.Header().Length),
+				Trailer:    metadata.MD(mdata).Copy(),
+			}
+			sh.HandleRPC(s.ctx, inTrailer)
+		}
+	}
+
+	if !endStream {
+		return
+	}
+
+	if statusGen == nil {
+		statusGen = status.New(rawStatusCode, grpcMessage)
+	}
+
+	// if client received END_STREAM from server while stream was still active, send RST_STREAM
+	rst := s.getState() == streamActive
+	t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
+}
+
+// readServerPreface reads and handles the initial settings frame from the
+// server.
+func (t *http2Client) readServerPreface() error {
+	frame, err := t.framer.fr.ReadFrame()
+	if err != nil {
+		return connectionErrorf(true, err, "error reading server preface: %v", err)
+	}
+	sf, ok := frame.(*http2.SettingsFrame)
+	if !ok {
+		return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
+	}
+	t.handleSettings(sf, true)
+	return nil
+}
+
+// reader verifies the server preface and reads all subsequent data from
+// network connection.  If the server preface is not read successfully, an
+// error is pushed to errCh; otherwise errCh is closed with no error.
+func (t *http2Client) reader(errCh chan<- error) {
+	defer close(t.readerDone)
+
+	if err := t.readServerPreface(); err != nil {
+		errCh <- err
+		return
+	}
+	close(errCh)
+	if t.keepaliveEnabled {
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+	}
+
+	// loop to keep reading incoming messages on this transport.
+	for {
+		t.controlBuf.throttle()
+		frame, err := t.framer.fr.ReadFrame()
+		if t.keepaliveEnabled {
+			atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+		}
+		if err != nil {
+			// Abort an active stream if the http2.Framer returns a
+			// http2.StreamError. This can happen only if the server's response
+			// is malformed http2.
+			if se, ok := err.(http2.StreamError); ok {
+				t.mu.Lock()
+				s := t.activeStreams[se.StreamID]
+				t.mu.Unlock()
+				if s != nil {
+					// use error detail to provide better err message
+					code := http2ErrConvTab[se.Code]
+					errorDetail := t.framer.fr.ErrorDetail()
+					var msg string
+					if errorDetail != nil {
+						msg = errorDetail.Error()
+					} else {
+						msg = "received invalid frame"
+					}
+					t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
+				}
+				continue
+			} else {
+				// Transport error.
+				t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
+				return
+			}
+		}
+		switch frame := frame.(type) {
+		case *http2.MetaHeadersFrame:
+			t.operateHeaders(frame)
+		case *http2.DataFrame:
+			t.handleData(frame)
+		case *http2.RSTStreamFrame:
+			t.handleRSTStream(frame)
+		case *http2.SettingsFrame:
+			t.handleSettings(frame, false)
+		case *http2.PingFrame:
+			t.handlePing(frame)
+		case *http2.GoAwayFrame:
+			t.handleGoAway(frame)
+		case *http2.WindowUpdateFrame:
+			t.handleWindowUpdate(frame)
+		default:
+			if logger.V(logLevel) {
+				logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+			}
+		}
+	}
+}
+
+func minTime(a, b time.Duration) time.Duration {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
+func (t *http2Client) keepalive() {
+	p := &ping{data: [8]byte{}}
+	// True iff a ping has been sent, and no data has been received since then.
+	outstandingPing := false
+	// Amount of time remaining before which we should receive an ACK for the
+	// last sent ping.
+	timeoutLeft := time.Duration(0)
+	// Records the last value of t.lastRead before we go block on the timer.
+	// This is required to check for read activity since then.
+	prevNano := time.Now().UnixNano()
+	timer := time.NewTimer(t.kp.Time)
+	for {
+		select {
+		case <-timer.C:
+			lastRead := atomic.LoadInt64(&t.lastRead)
+			if lastRead > prevNano {
+				// There has been read activity since the last time we were here.
+				outstandingPing = false
+				// Next timer should fire at kp.Time seconds from lastRead time.
+				timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+				prevNano = lastRead
+				continue
+			}
+			if outstandingPing && timeoutLeft <= 0 {
+				t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
+				return
+			}
+			t.mu.Lock()
+			if t.state == closing {
+				// If the transport is closing, we should exit from the
+				// keepalive goroutine here. If not, we could have a race
+				// between the call to Signal() from Close() and the call to
+				// Wait() here, whereby the keepalive goroutine ends up
+				// blocking on the condition variable which will never be
+				// signalled again.
+				t.mu.Unlock()
+				return
+			}
+			if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
+				// If a ping was sent out previously (because there were active
+				// streams at that point) which wasn't acked and its timeout
+				// hadn't fired, but we got here and are about to go dormant,
+				// we should make sure that we unconditionally send a ping once
+				// we awaken.
+				outstandingPing = false
+				t.kpDormant = true
+				t.kpDormancyCond.Wait()
+			}
+			t.kpDormant = false
+			t.mu.Unlock()
+
+			// We get here either because we were dormant and a new stream was
+			// created which unblocked the Wait() call, or because the
+			// keepalive timer expired. In both cases, we need to send a ping.
+			if !outstandingPing {
+				if channelz.IsOn() {
+					atomic.AddInt64(&t.czData.kpCount, 1)
+				}
+				t.controlBuf.put(p)
+				timeoutLeft = t.kp.Timeout
+				outstandingPing = true
+			}
+			// The amount of time to sleep here is the minimum of kp.Time and
+			// timeoutLeft. This will ensure that we wait only for kp.Time
+			// before sending out the next ping (for cases where the ping is
+			// acked).
+			sleepDuration := minTime(t.kp.Time, timeoutLeft)
+			timeoutLeft -= sleepDuration
+			timer.Reset(sleepDuration)
+		case <-t.ctx.Done():
+			if !timer.Stop() {
+				<-timer.C
+			}
+			return
+		}
+	}
+}
+
+func (t *http2Client) Error() <-chan struct{} {
+	return t.ctx.Done()
+}
+
+func (t *http2Client) GoAway() <-chan struct{} {
+	return t.goAway
+}
+
+func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
+	s := channelz.SocketInternalMetric{
+		StreamsStarted:                  atomic.LoadInt64(&t.czData.streamsStarted),
+		StreamsSucceeded:                atomic.LoadInt64(&t.czData.streamsSucceeded),
+		StreamsFailed:                   atomic.LoadInt64(&t.czData.streamsFailed),
+		MessagesSent:                    atomic.LoadInt64(&t.czData.msgSent),
+		MessagesReceived:                atomic.LoadInt64(&t.czData.msgRecv),
+		KeepAlivesSent:                  atomic.LoadInt64(&t.czData.kpCount),
+		LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
+		LastMessageSentTimestamp:        time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
+		LastMessageReceivedTimestamp:    time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
+		LocalFlowControlWindow:          int64(t.fc.getSize()),
+		SocketOptions:                   channelz.GetSocketOption(t.conn),
+		LocalAddr:                       t.localAddr,
+		RemoteAddr:                      t.remoteAddr,
+		// RemoteName :
+	}
+	if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
+		s.Security = au.GetSecurityValue()
+	}
+	s.RemoteFlowControlWindow = t.getOutFlowWindow()
+	return &s
+}
+
+func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
+
+func (t *http2Client) IncrMsgSent() {
+	atomic.AddInt64(&t.czData.msgSent, 1)
+	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
+}
+
+func (t *http2Client) IncrMsgRecv() {
+	atomic.AddInt64(&t.czData.msgRecv, 1)
+	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
+}
+
+func (t *http2Client) getOutFlowWindow() int64 {
+	resp := make(chan uint32, 1)
+	timer := time.NewTimer(time.Second)
+	defer timer.Stop()
+	t.controlBuf.put(&outFlowControlSizeRequest{resp})
+	select {
+	case sz := <-resp:
+		return int64(sz)
+	case <-t.ctxDone:
+		return -1
+	case <-timer.C:
+		return -2
+	}
+}
+
+func (t *http2Client) stateForTesting() transportState {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	return t.state
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
new file mode 100644
index 000000000..bc3da7067
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -0,0 +1,1461 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"net/http"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/internal/syscall"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcrand"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+	"google.golang.org/grpc/tap"
+)
+
+var (
+	// ErrIllegalHeaderWrite indicates that setting header is illegal because of
+	// the stream's state.
+	ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
+	// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
+	// than the limit set by peer.
+	ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
+)
+
+// serverConnectionCounter counts the number of connections a server has seen
+// (equal to the number of http2Servers created). Must be accessed atomically.
+var serverConnectionCounter uint64
+
+// http2Server implements the ServerTransport interface with HTTP2.
+type http2Server struct {
+	lastRead    int64 // Keep this field 64-bit aligned. Accessed atomically.
+	ctx         context.Context
+	done        chan struct{}
+	conn        net.Conn
+	loopy       *loopyWriter
+	readerDone  chan struct{} // sync point to enable testing.
+	writerDone  chan struct{} // sync point to enable testing.
+	remoteAddr  net.Addr
+	localAddr   net.Addr
+	authInfo    credentials.AuthInfo // auth info about the connection
+	inTapHandle tap.ServerInHandle
+	framer      *framer
+	// The max number of concurrent streams.
+	maxStreams uint32
+	// controlBuf delivers all the control related tasks (e.g., window
+	// updates, reset streams, and various settings) to the controller.
+	controlBuf *controlBuffer
+	fc         *trInFlow
+	stats      []stats.Handler
+	// Keepalive and max-age parameters for the server.
+	kp keepalive.ServerParameters
+	// Keepalive enforcement policy.
+	kep keepalive.EnforcementPolicy
+	// The time instance last ping was received.
+	lastPingAt time.Time
+	// Number of times the client has violated keepalive ping policy so far.
+	pingStrikes uint8
+	// Flag to signify that number of ping strikes should be reset to 0.
+	// This is set whenever data or header frames are sent.
+	// 1 means yes.
+	resetPingStrikes      uint32 // Accessed atomically.
+	initialWindowSize     int32
+	bdpEst                *bdpEstimator
+	maxSendHeaderListSize *uint32
+
+	mu sync.Mutex // guard the following
+
+	// drainEvent is initialized when Drain() is called the first time. After
+	// which the server writes out the first GoAway(with ID 2^31-1) frame. Then
+	// an independent goroutine will be launched to later send the second
+	// GoAway. During this time we don't want to write another first GoAway(with
+	// ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
+	// already initialized since draining is already underway.
+	drainEvent    *grpcsync.Event
+	state         transportState
+	activeStreams map[uint32]*Stream
+	// idle is the time instant when the connection went idle.
+	// This is either the beginning of the connection or when the number of
+	// RPCs go down to 0.
+	// When the connection is busy, this value is set to 0.
+	idle time.Time
+
+	// Fields below are for channelz metric collection.
+	channelzID *channelz.Identifier
+	czData     *channelzData
+	bufferPool *bufferPool
+
+	connectionID uint64
+
+	// maxStreamMu guards the maximum stream ID
+	// This lock may not be taken if mu is already held.
+	maxStreamMu sync.Mutex
+	maxStreamID uint32 // max stream ID ever seen
+}
+
+// NewServerTransport creates a http2 transport with conn and configuration
+// options from config.
+//
+// It returns a non-nil transport and a nil error on success. On failure, it
+// returns a nil transport and a non-nil error. For a special case where the
+// underlying conn gets closed before the client preface could be read, it
+// returns a nil transport and a nil error.
+func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+	var authInfo credentials.AuthInfo
+	rawConn := conn
+	if config.Credentials != nil {
+		var err error
+		conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
+		if err != nil {
+			// ErrConnDispatched means that the connection was dispatched away
+			// from gRPC; those connections should be left open. io.EOF means
+			// the connection was closed before handshaking completed, which can
+			// happen naturally from probers. Return these errors directly.
+			if err == credentials.ErrConnDispatched || err == io.EOF {
+				return nil, err
+			}
+			return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+		}
+	}
+	writeBufSize := config.WriteBufferSize
+	readBufSize := config.ReadBufferSize
+	maxHeaderListSize := defaultServerMaxHeaderListSize
+	if config.MaxHeaderListSize != nil {
+		maxHeaderListSize = *config.MaxHeaderListSize
+	}
+	framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
+	// Send initial settings as connection preface to client.
+	isettings := []http2.Setting{{
+		ID:  http2.SettingMaxFrameSize,
+		Val: http2MaxFrameLen,
+	}}
+	// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
+	// permitted in the HTTP2 spec.
+	maxStreams := config.MaxStreams
+	if maxStreams == 0 {
+		maxStreams = math.MaxUint32
+	} else {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingMaxConcurrentStreams,
+			Val: maxStreams,
+		})
+	}
+	dynamicWindow := true
+	iwz := int32(initialWindowSize)
+	if config.InitialWindowSize >= defaultWindowSize {
+		iwz = config.InitialWindowSize
+		dynamicWindow = false
+	}
+	icwz := int32(initialWindowSize)
+	if config.InitialConnWindowSize >= defaultWindowSize {
+		icwz = config.InitialConnWindowSize
+		dynamicWindow = false
+	}
+	if iwz != defaultWindowSize {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingInitialWindowSize,
+			Val: uint32(iwz)})
+	}
+	if config.MaxHeaderListSize != nil {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingMaxHeaderListSize,
+			Val: *config.MaxHeaderListSize,
+		})
+	}
+	if config.HeaderTableSize != nil {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingHeaderTableSize,
+			Val: *config.HeaderTableSize,
+		})
+	}
+	if err := framer.fr.WriteSettings(isettings...); err != nil {
+		return nil, connectionErrorf(false, err, "transport: %v", err)
+	}
+	// Adjust the connection flow control window if needed.
+	if delta := uint32(icwz - defaultWindowSize); delta > 0 {
+		if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
+			return nil, connectionErrorf(false, err, "transport: %v", err)
+		}
+	}
+	kp := config.KeepaliveParams
+	if kp.MaxConnectionIdle == 0 {
+		kp.MaxConnectionIdle = defaultMaxConnectionIdle
+	}
+	if kp.MaxConnectionAge == 0 {
+		kp.MaxConnectionAge = defaultMaxConnectionAge
+	}
+	// Add a jitter to MaxConnectionAge.
+	kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
+	if kp.MaxConnectionAgeGrace == 0 {
+		kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
+	}
+	if kp.Time == 0 {
+		kp.Time = defaultServerKeepaliveTime
+	}
+	if kp.Timeout == 0 {
+		kp.Timeout = defaultServerKeepaliveTimeout
+	}
+	if kp.Time != infinity {
+		if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+			return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
+		}
+	}
+	kep := config.KeepalivePolicy
+	if kep.MinTime == 0 {
+		kep.MinTime = defaultKeepalivePolicyMinTime
+	}
+
+	done := make(chan struct{})
+	t := &http2Server{
+		ctx:               setConnection(context.Background(), rawConn),
+		done:              done,
+		conn:              conn,
+		remoteAddr:        conn.RemoteAddr(),
+		localAddr:         conn.LocalAddr(),
+		authInfo:          authInfo,
+		framer:            framer,
+		readerDone:        make(chan struct{}),
+		writerDone:        make(chan struct{}),
+		maxStreams:        maxStreams,
+		inTapHandle:       config.InTapHandle,
+		fc:                &trInFlow{limit: uint32(icwz)},
+		state:             reachable,
+		activeStreams:     make(map[uint32]*Stream),
+		stats:             config.StatsHandlers,
+		kp:                kp,
+		idle:              time.Now(),
+		kep:               kep,
+		initialWindowSize: iwz,
+		czData:            new(channelzData),
+		bufferPool:        newBufferPool(),
+	}
+	// Add peer information to the http2server context.
+	t.ctx = peer.NewContext(t.ctx, t.getPeer())
+
+	t.controlBuf = newControlBuffer(t.done)
+	if dynamicWindow {
+		t.bdpEst = &bdpEstimator{
+			bdp:               initialWindowSize,
+			updateFlowControl: t.updateFlowControl,
+		}
+	}
+	for _, sh := range t.stats {
+		t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
+			RemoteAddr: t.remoteAddr,
+			LocalAddr:  t.localAddr,
+		})
+		connBegin := &stats.ConnBegin{}
+		sh.HandleConn(t.ctx, connBegin)
+	}
+	t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
+	if err != nil {
+		return nil, err
+	}
+
+	t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
+	t.framer.writer.Flush()
+
+	defer func() {
+		if err != nil {
+			t.Close(err)
+		}
+	}()
+
+	// Check the validity of client preface.
+	preface := make([]byte, len(clientPreface))
+	if _, err := io.ReadFull(t.conn, preface); err != nil {
+		// In deployments where a gRPC server runs behind a cloud load balancer
+		// which performs regular TCP level health checks, the connection is
+		// closed immediately by the latter.  Returning io.EOF here allows the
+		// grpc server implementation to recognize this scenario and suppress
+		// logging to reduce spam.
+		if err == io.EOF {
+			return nil, io.EOF
+		}
+		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
+	}
+	if !bytes.Equal(preface, clientPreface) {
+		return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
+	}
+
+	frame, err := t.framer.fr.ReadFrame()
+	if err == io.EOF || err == io.ErrUnexpectedEOF {
+		return nil, err
+	}
+	if err != nil {
+		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
+	}
+	atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+	sf, ok := frame.(*http2.SettingsFrame)
+	if !ok {
+		return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
+	}
+	t.handleSettings(sf)
+
+	go func() {
+		t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
+		t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
+		err := t.loopy.run()
+		if logger.V(logLevel) {
+			logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
+		}
+		t.conn.Close()
+		t.controlBuf.finish()
+		close(t.writerDone)
+	}()
+	go t.keepalive()
+	return t, nil
+}
+
+// operateHeaders takes action on the decoded headers. Returns an error if fatal
+// error encountered and transport needs to close, otherwise returns nil.
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
+	// Acquire max stream ID lock for entire duration
+	t.maxStreamMu.Lock()
+	defer t.maxStreamMu.Unlock()
+
+	streamID := frame.Header().StreamID
+
+	// frame.Truncated is set to true when framer detects that the current header
+	// list size hits MaxHeaderListSize limit.
+	if frame.Truncated {
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeFrameSize,
+			onWrite:  func() {},
+		})
+		return nil
+	}
+
+	if streamID%2 != 1 || streamID <= t.maxStreamID {
+		// illegal gRPC stream id.
+		return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
+	}
+	t.maxStreamID = streamID
+
+	buf := newRecvBuffer()
+	s := &Stream{
+		id:  streamID,
+		st:  t,
+		buf: buf,
+		fc:  &inFlow{limit: uint32(t.initialWindowSize)},
+	}
+	var (
+		// if false, content-type was missing or invalid
+		isGRPC      = false
+		contentType = ""
+		mdata       = make(map[string][]string)
+		httpMethod  string
+		// these are set if an error is encountered while parsing the headers
+		protocolError bool
+		headerError   *status.Status
+
+		timeoutSet bool
+		timeout    time.Duration
+	)
+
+	for _, hf := range frame.Fields {
+		switch hf.Name {
+		case "content-type":
+			contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
+			if !validContentType {
+				contentType = hf.Value
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			s.contentSubtype = contentSubtype
+			isGRPC = true
+		case "grpc-encoding":
+			s.recvCompress = hf.Value
+		case ":method":
+			httpMethod = hf.Value
+		case ":path":
+			s.method = hf.Value
+		case "grpc-timeout":
+			timeoutSet = true
+			var err error
+			if timeout, err = decodeTimeout(hf.Value); err != nil {
+				headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
+			}
+		// "Transports must consider requests containing the Connection header
+		// as malformed." - A41
+		case "connection":
+			if logger.V(logLevel) {
+				logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
+			}
+			protocolError = true
+		default:
+			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+				break
+			}
+			v, err := decodeMetadataHeader(hf.Name, hf.Value)
+			if err != nil {
+				headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
+				logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], v)
+		}
+	}
+
+	// "If multiple Host headers or multiple :authority headers are present, the
+	// request must be rejected with an HTTP status code 400 as required by Host
+	// validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
+	// with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
+	// error, this takes precedence over a client not speaking gRPC.
+	if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
+		errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
+		if logger.V(logLevel) {
+			logger.Errorf("transport: %v", errMsg)
+		}
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusBadRequest,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.New(codes.Internal, errMsg),
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+
+	if protocolError {
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeProtocol,
+			onWrite:  func() {},
+		})
+		return nil
+	}
+	if !isGRPC {
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusUnsupportedMediaType,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+	if headerError != nil {
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusBadRequest,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         headerError,
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+
+	// "If :authority is missing, Host must be renamed to :authority." - A41
+	if len(mdata[":authority"]) == 0 {
+		// No-op if host isn't present, no eventual :authority header is a valid
+		// RPC.
+		if host, ok := mdata["host"]; ok {
+			mdata[":authority"] = host
+			delete(mdata, "host")
+		}
+	} else {
+		// "If :authority is present, Host must be discarded" - A41
+		delete(mdata, "host")
+	}
+
+	if frame.StreamEnded() {
+		// s is just created by the caller. No lock needed.
+		s.state = streamReadDone
+	}
+	if timeoutSet {
+		s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout)
+	} else {
+		s.ctx, s.cancel = context.WithCancel(t.ctx)
+	}
+
+	// Attach the received metadata to the context.
+	if len(mdata) > 0 {
+		s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
+		if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 {
+			s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1]))
+		}
+		if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 {
+			s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1]))
+		}
+	}
+	t.mu.Lock()
+	if t.state != reachable {
+		t.mu.Unlock()
+		s.cancel()
+		return nil
+	}
+	if uint32(len(t.activeStreams)) >= t.maxStreams {
+		t.mu.Unlock()
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeRefusedStream,
+			onWrite:  func() {},
+		})
+		s.cancel()
+		return nil
+	}
+	if httpMethod != http.MethodPost {
+		t.mu.Unlock()
+		errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
+		if logger.V(logLevel) {
+			logger.Infof("transport: %v", errMsg)
+		}
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     405,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.New(codes.Internal, errMsg),
+			rst:            !frame.StreamEnded(),
+		})
+		s.cancel()
+		return nil
+	}
+	if t.inTapHandle != nil {
+		var err error
+		if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
+			t.mu.Unlock()
+			if logger.V(logLevel) {
+				logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
+			}
+			stat, ok := status.FromError(err)
+			if !ok {
+				stat = status.New(codes.PermissionDenied, err.Error())
+			}
+			t.controlBuf.put(&earlyAbortStream{
+				httpStatus:     200,
+				streamID:       s.id,
+				contentSubtype: s.contentSubtype,
+				status:         stat,
+				rst:            !frame.StreamEnded(),
+			})
+			return nil
+		}
+	}
+	t.activeStreams[streamID] = s
+	if len(t.activeStreams) == 1 {
+		t.idle = time.Time{}
+	}
+	t.mu.Unlock()
+	if channelz.IsOn() {
+		atomic.AddInt64(&t.czData.streamsStarted, 1)
+		atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
+	}
+	s.requestRead = func(n int) {
+		t.adjustWindow(s, uint32(n))
+	}
+	s.ctx = traceCtx(s.ctx, s.method)
+	for _, sh := range t.stats {
+		s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
+		inHeader := &stats.InHeader{
+			FullMethod:  s.method,
+			RemoteAddr:  t.remoteAddr,
+			LocalAddr:   t.localAddr,
+			Compression: s.recvCompress,
+			WireLength:  int(frame.Header().Length),
+			Header:      metadata.MD(mdata).Copy(),
+		}
+		sh.HandleRPC(s.ctx, inHeader)
+	}
+	s.ctxDone = s.ctx.Done()
+	s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
+	s.trReader = &transportReader{
+		reader: &recvBufferReader{
+			ctx:        s.ctx,
+			ctxDone:    s.ctxDone,
+			recv:       s.buf,
+			freeBuffer: t.bufferPool.put,
+		},
+		windowHandler: func(n int) {
+			t.updateWindow(s, uint32(n))
+		},
+	}
+	// Register the stream with loopy.
+	t.controlBuf.put(&registerStream{
+		streamID: s.id,
+		wq:       s.wq,
+	})
+	handle(s)
+	return nil
+}
+
+// HandleStreams receives incoming streams using the given handler. This is
+// typically run in a separate goroutine.
+// traceCtx attaches trace to ctx and returns the new context.
+func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
+	defer close(t.readerDone)
+	for {
+		t.controlBuf.throttle()
+		frame, err := t.framer.fr.ReadFrame()
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+		if err != nil {
+			if se, ok := err.(http2.StreamError); ok {
+				if logger.V(logLevel) {
+					logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+				}
+				t.mu.Lock()
+				s := t.activeStreams[se.StreamID]
+				t.mu.Unlock()
+				if s != nil {
+					t.closeStream(s, true, se.Code, false)
+				} else {
+					t.controlBuf.put(&cleanupStream{
+						streamID: se.StreamID,
+						rst:      true,
+						rstCode:  se.Code,
+						onWrite:  func() {},
+					})
+				}
+				continue
+			}
+			if err == io.EOF || err == io.ErrUnexpectedEOF {
+				t.Close(err)
+				return
+			}
+			t.Close(err)
+			return
+		}
+		switch frame := frame.(type) {
+		case *http2.MetaHeadersFrame:
+			if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
+				t.Close(err)
+				break
+			}
+		case *http2.DataFrame:
+			t.handleData(frame)
+		case *http2.RSTStreamFrame:
+			t.handleRSTStream(frame)
+		case *http2.SettingsFrame:
+			t.handleSettings(frame)
+		case *http2.PingFrame:
+			t.handlePing(frame)
+		case *http2.WindowUpdateFrame:
+			t.handleWindowUpdate(frame)
+		case *http2.GoAwayFrame:
+			// TODO: Handle GoAway from the client appropriately.
+		default:
+			if logger.V(logLevel) {
+				logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+			}
+		}
+	}
+}
+
+func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.activeStreams == nil {
+		// The transport is closing.
+		return nil, false
+	}
+	s, ok := t.activeStreams[f.Header().StreamID]
+	if !ok {
+		// The stream is already done.
+		return nil, false
+	}
+	return s, true
+}
+
+// adjustWindow sends out extra window update over the initial window size
+// of stream if the application is requesting data larger in size than
+// the window.
+func (t *http2Server) adjustWindow(s *Stream, n uint32) {
+	if w := s.fc.maybeAdjust(n); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
+	}
+
+}
+
+// updateWindow adjusts the inbound quota for the stream and the transport.
+// Window updates will deliver to the controller for sending when
+// the cumulative quota exceeds the corresponding threshold.
+func (t *http2Server) updateWindow(s *Stream, n uint32) {
+	if w := s.fc.onRead(n); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
+			increment: w,
+		})
+	}
+}
+
+// updateFlowControl updates the incoming flow control windows
+// for the transport and the stream based on the current bdp
+// estimation.
+func (t *http2Server) updateFlowControl(n uint32) {
+	t.mu.Lock()
+	for _, s := range t.activeStreams {
+		s.fc.newLimit(n)
+	}
+	t.initialWindowSize = int32(n)
+	t.mu.Unlock()
+	t.controlBuf.put(&outgoingWindowUpdate{
+		streamID:  0,
+		increment: t.fc.newLimit(n),
+	})
+	t.controlBuf.put(&outgoingSettings{
+		ss: []http2.Setting{
+			{
+				ID:  http2.SettingInitialWindowSize,
+				Val: n,
+			},
+		},
+	})
+
+}
+
+func (t *http2Server) handleData(f *http2.DataFrame) {
+	size := f.Header().Length
+	var sendBDPPing bool
+	if t.bdpEst != nil {
+		sendBDPPing = t.bdpEst.add(size)
+	}
+	// Decouple connection's flow control from application's read.
+	// An update on connection's flow control should not depend on
+	// whether user application has read the data or not. Such a
+	// restriction is already imposed on the stream's flow control,
+	// and therefore the sender will be blocked anyways.
+	// Decoupling the connection flow control will prevent other
+	// active(fast) streams from starving in presence of slow or
+	// inactive streams.
+	if w := t.fc.onData(size); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{
+			streamID:  0,
+			increment: w,
+		})
+	}
+	if sendBDPPing {
+		// Avoid excessive ping detection (e.g. in an L7 proxy)
+		// by sending a window update prior to the BDP ping.
+		if w := t.fc.reset(); w > 0 {
+			t.controlBuf.put(&outgoingWindowUpdate{
+				streamID:  0,
+				increment: w,
+			})
+		}
+		t.controlBuf.put(bdpPing)
+	}
+	// Select the right stream to dispatch.
+	s, ok := t.getStream(f)
+	if !ok {
+		return
+	}
+	if s.getState() == streamReadDone {
+		t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
+		return
+	}
+	if size > 0 {
+		if err := s.fc.onData(size); err != nil {
+			t.closeStream(s, true, http2.ErrCodeFlowControl, false)
+			return
+		}
+		if f.Header().Flags.Has(http2.FlagDataPadded) {
+			if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+				t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
+			}
+		}
+		// TODO(bradfitz, zhaoq): A copy is required here because there is no
+		// guarantee f.Data() is consumed before the arrival of next frame.
+		// Can this copy be eliminated?
+		if len(f.Data()) > 0 {
+			buffer := t.bufferPool.get()
+			buffer.Reset()
+			buffer.Write(f.Data())
+			s.write(recvMsg{buffer: buffer})
+		}
+	}
+	if f.StreamEnded() {
+		// Received the end of stream from the client.
+		s.compareAndSwapState(streamActive, streamReadDone)
+		s.write(recvMsg{err: io.EOF})
+	}
+}
+
+func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
+	// If the stream is not deleted from the transport's active streams map, then do a regular close stream.
+	if s, ok := t.getStream(f); ok {
+		t.closeStream(s, false, 0, false)
+		return
+	}
+	// If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map.
+	t.controlBuf.put(&cleanupStream{
+		streamID: f.Header().StreamID,
+		rst:      false,
+		rstCode:  0,
+		onWrite:  func() {},
+	})
+}
+
+func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
+	if f.IsAck() {
+		return
+	}
+	var ss []http2.Setting
+	var updateFuncs []func()
+	f.ForeachSetting(func(s http2.Setting) error {
+		switch s.ID {
+		case http2.SettingMaxHeaderListSize:
+			updateFuncs = append(updateFuncs, func() {
+				t.maxSendHeaderListSize = new(uint32)
+				*t.maxSendHeaderListSize = s.Val
+			})
+		default:
+			ss = append(ss, s)
+		}
+		return nil
+	})
+	t.controlBuf.executeAndPut(func(interface{}) bool {
+		for _, f := range updateFuncs {
+			f()
+		}
+		return true
+	}, &incomingSettings{
+		ss: ss,
+	})
+}
+
+const (
+	maxPingStrikes     = 2
+	defaultPingTimeout = 2 * time.Hour
+)
+
+func (t *http2Server) handlePing(f *http2.PingFrame) {
+	if f.IsAck() {
+		if f.Data == goAwayPing.data && t.drainEvent != nil {
+			t.drainEvent.Fire()
+			return
+		}
+		// Maybe it's a BDP ping.
+		if t.bdpEst != nil {
+			t.bdpEst.calculate(f.Data)
+		}
+		return
+	}
+	pingAck := &ping{ack: true}
+	copy(pingAck.data[:], f.Data[:])
+	t.controlBuf.put(pingAck)
+
+	now := time.Now()
+	defer func() {
+		t.lastPingAt = now
+	}()
+	// A reset ping strikes means that we don't need to check for policy
+	// violation for this ping and the pingStrikes counter should be set
+	// to 0.
+	if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
+		t.pingStrikes = 0
+		return
+	}
+	t.mu.Lock()
+	ns := len(t.activeStreams)
+	t.mu.Unlock()
+	if ns < 1 && !t.kep.PermitWithoutStream {
+		// Keepalive shouldn't be active thus, this new ping should
+		// have come after at least defaultPingTimeout.
+		if t.lastPingAt.Add(defaultPingTimeout).After(now) {
+			t.pingStrikes++
+		}
+	} else {
+		// Check if keepalive policy is respected.
+		if t.lastPingAt.Add(t.kep.MinTime).After(now) {
+			t.pingStrikes++
+		}
+	}
+
+	if t.pingStrikes > maxPingStrikes {
+		// Send goaway and close the connection.
+		t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
+	}
+}
+
+func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+	t.controlBuf.put(&incomingWindowUpdate{
+		streamID:  f.Header().StreamID,
+		increment: f.Increment,
+	})
+}
+
+func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField {
+	for k, vv := range md {
+		if isReservedHeader(k) {
+			// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+			continue
+		}
+		for _, v := range vv {
+			headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+		}
+	}
+	return headerFields
+}
+
+func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
+	if t.maxSendHeaderListSize == nil {
+		return true
+	}
+	hdrFrame := it.(*headerFrame)
+	var sz int64
+	for _, f := range hdrFrame.hf {
+		if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
+			if logger.V(logLevel) {
+				logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+			}
+			return false
+		}
+	}
+	return true
+}
+
+func (t *http2Server) streamContextErr(s *Stream) error {
+	select {
+	case <-t.done:
+		return ErrConnClosing
+	default:
+	}
+	return ContextErr(s.ctx.Err())
+}
+
+// WriteHeader sends the header metadata md back to the client.
+func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
+	s.hdrMu.Lock()
+	defer s.hdrMu.Unlock()
+	if s.getState() == streamDone {
+		return t.streamContextErr(s)
+	}
+
+	if s.updateHeaderSent() {
+		return ErrIllegalHeaderWrite
+	}
+
+	if md.Len() > 0 {
+		if s.header.Len() > 0 {
+			s.header = metadata.Join(s.header, md)
+		} else {
+			s.header = md
+		}
+	}
+	if err := t.writeHeaderLocked(s); err != nil {
+		return status.Convert(err).Err()
+	}
+	return nil
+}
+
+func (t *http2Server) setResetPingStrikes() {
+	atomic.StoreUint32(&t.resetPingStrikes, 1)
+}
+
+func (t *http2Server) writeHeaderLocked(s *Stream) error {
+	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+	// first and create a slice of that exact size.
+	headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
+	if s.sendCompress != "" {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
+	}
+	headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
+	success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
+		streamID:  s.id,
+		hf:        headerFields,
+		endStream: false,
+		onWrite:   t.setResetPingStrikes,
+	})
+	if !success {
+		if err != nil {
+			return err
+		}
+		t.closeStream(s, true, http2.ErrCodeInternal, false)
+		return ErrHeaderListSizeLimitViolation
+	}
+	for _, sh := range t.stats {
+		// Note: Headers are compressed with hpack after this call returns.
+		// No WireLength field is set here.
+		outHeader := &stats.OutHeader{
+			Header:      s.header.Copy(),
+			Compression: s.sendCompress,
+		}
+		sh.HandleRPC(s.Context(), outHeader)
+	}
+	return nil
+}
+
+// WriteStatus sends stream status to the client and terminates the stream.
+// There is no further I/O operations being able to perform on this stream.
+// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
+// OK is adopted.
+func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
+	s.hdrMu.Lock()
+	defer s.hdrMu.Unlock()
+
+	if s.getState() == streamDone {
+		return nil
+	}
+
+	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+	// first and create a slice of that exact size.
+	headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
+	if !s.updateHeaderSent() {                      // No headers have been sent.
+		if len(s.header) > 0 { // Send a separate header frame.
+			if err := t.writeHeaderLocked(s); err != nil {
+				return err
+			}
+		} else { // Send a trailer only response.
+			headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
+			headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
+		}
+	}
+	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
+
+	if p := st.Proto(); p != nil && len(p.Details) > 0 {
+		stBytes, err := proto.Marshal(p)
+		if err != nil {
+			// TODO: return error instead, when callers are able to handle it.
+			logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
+		} else {
+			headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
+		}
+	}
+
+	// Attach the trailer metadata.
+	headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer)
+	trailingHeader := &headerFrame{
+		streamID:  s.id,
+		hf:        headerFields,
+		endStream: true,
+		onWrite:   t.setResetPingStrikes,
+	}
+
+	success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
+	if !success {
+		if err != nil {
+			return err
+		}
+		t.closeStream(s, true, http2.ErrCodeInternal, false)
+		return ErrHeaderListSizeLimitViolation
+	}
+	// Send a RST_STREAM after the trailers if the client has not already half-closed.
+	rst := s.getState() == streamActive
+	t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
+	for _, sh := range t.stats {
+		// Note: The trailer fields are compressed with hpack after this call returns.
+		// No WireLength field is set here.
+		sh.HandleRPC(s.Context(), &stats.OutTrailer{
+			Trailer: s.trailer.Copy(),
+		})
+	}
+	return nil
+}
+
+// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
+// is returns if it fails (e.g., framing error, transport error).
+func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+	if !s.isHeaderSent() { // Headers haven't been written yet.
+		if err := t.WriteHeader(s, nil); err != nil {
+			return err
+		}
+	} else {
+		// Writing headers checks for this condition.
+		if s.getState() == streamDone {
+			return t.streamContextErr(s)
+		}
+	}
+	df := &dataFrame{
+		streamID:    s.id,
+		h:           hdr,
+		d:           data,
+		onEachWrite: t.setResetPingStrikes,
+	}
+	if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+		return t.streamContextErr(s)
+	}
+	return t.controlBuf.put(df)
+}
+
+// keepalive running in a separate goroutine does the following:
+// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
+// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
+// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
+// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection
+// after an additional duration of keepalive.Timeout.
+func (t *http2Server) keepalive() {
+	p := &ping{}
+	// True iff a ping has been sent, and no data has been received since then.
+	outstandingPing := false
+	// Amount of time remaining before which we should receive an ACK for the
+	// last sent ping.
+	kpTimeoutLeft := time.Duration(0)
+	// Records the last value of t.lastRead before we go block on the timer.
+	// This is required to check for read activity since then.
+	prevNano := time.Now().UnixNano()
+	// Initialize the different timers to their default values.
+	idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
+	ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
+	kpTimer := time.NewTimer(t.kp.Time)
+	defer func() {
+		// We need to drain the underlying channel in these timers after a call
+		// to Stop(), only if we are interested in resetting them. Clearly we
+		// are not interested in resetting them here.
+		idleTimer.Stop()
+		ageTimer.Stop()
+		kpTimer.Stop()
+	}()
+
+	for {
+		select {
+		case <-idleTimer.C:
+			t.mu.Lock()
+			idle := t.idle
+			if idle.IsZero() { // The connection is non-idle.
+				t.mu.Unlock()
+				idleTimer.Reset(t.kp.MaxConnectionIdle)
+				continue
+			}
+			val := t.kp.MaxConnectionIdle - time.Since(idle)
+			t.mu.Unlock()
+			if val <= 0 {
+				// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
+				// Gracefully close the connection.
+				t.Drain()
+				return
+			}
+			idleTimer.Reset(val)
+		case <-ageTimer.C:
+			t.Drain()
+			ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
+			select {
+			case <-ageTimer.C:
+				// Close the connection after grace period.
+				if logger.V(logLevel) {
+					logger.Infof("transport: closing server transport due to maximum connection age.")
+				}
+				t.controlBuf.put(closeConnection{})
+			case <-t.done:
+			}
+			return
+		case <-kpTimer.C:
+			lastRead := atomic.LoadInt64(&t.lastRead)
+			if lastRead > prevNano {
+				// There has been read activity since the last time we were
+				// here. Setup the timer to fire at kp.Time seconds from
+				// lastRead time and continue.
+				outstandingPing = false
+				kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+				prevNano = lastRead
+				continue
+			}
+			if outstandingPing && kpTimeoutLeft <= 0 {
+				t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
+				return
+			}
+			if !outstandingPing {
+				if channelz.IsOn() {
+					atomic.AddInt64(&t.czData.kpCount, 1)
+				}
+				t.controlBuf.put(p)
+				kpTimeoutLeft = t.kp.Timeout
+				outstandingPing = true
+			}
+			// The amount of time to sleep here is the minimum of kp.Time and
+			// timeoutLeft. This will ensure that we wait only for kp.Time
+			// before sending out the next ping (for cases where the ping is
+			// acked).
+			sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
+			kpTimeoutLeft -= sleepDuration
+			kpTimer.Reset(sleepDuration)
+		case <-t.done:
+			return
+		}
+	}
+}
+
+// Close starts shutting down the http2Server transport.
+// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
+// could cause some resource issue. Revisit this later.
+func (t *http2Server) Close(err error) {
+	t.mu.Lock()
+	if t.state == closing {
+		t.mu.Unlock()
+		return
+	}
+	if logger.V(logLevel) {
+		logger.Infof("transport: closing: %v", err)
+	}
+	t.state = closing
+	streams := t.activeStreams
+	t.activeStreams = nil
+	t.mu.Unlock()
+	t.controlBuf.finish()
+	close(t.done)
+	if err := t.conn.Close(); err != nil && logger.V(logLevel) {
+		logger.Infof("transport: error closing conn during Close: %v", err)
+	}
+	channelz.RemoveEntry(t.channelzID)
+	// Cancel all active streams.
+	for _, s := range streams {
+		s.cancel()
+	}
+	for _, sh := range t.stats {
+		connEnd := &stats.ConnEnd{}
+		sh.HandleConn(t.ctx, connEnd)
+	}
+}
+
+// deleteStream deletes the stream s from transport's active streams.
+func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
+
+	t.mu.Lock()
+	if _, ok := t.activeStreams[s.id]; ok {
+		delete(t.activeStreams, s.id)
+		if len(t.activeStreams) == 0 {
+			t.idle = time.Now()
+		}
+	}
+	t.mu.Unlock()
+
+	if channelz.IsOn() {
+		if eosReceived {
+			atomic.AddInt64(&t.czData.streamsSucceeded, 1)
+		} else {
+			atomic.AddInt64(&t.czData.streamsFailed, 1)
+		}
+	}
+}
+
+// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
+func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
+	// In case stream sending and receiving are invoked in separate
+	// goroutines (e.g., bi-directional streaming), cancel needs to be
+	// called to interrupt the potential blocking on other goroutines.
+	s.cancel()
+
+	oldState := s.swapState(streamDone)
+	if oldState == streamDone {
+		// If the stream was already done, return.
+		return
+	}
+
+	hdr.cleanup = &cleanupStream{
+		streamID: s.id,
+		rst:      rst,
+		rstCode:  rstCode,
+		onWrite: func() {
+			t.deleteStream(s, eosReceived)
+		},
+	}
+	t.controlBuf.put(hdr)
+}
+
+// closeStream clears the footprint of a stream when the stream is not needed any more.
+func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+	// In case stream sending and receiving are invoked in separate
+	// goroutines (e.g., bi-directional streaming), cancel needs to be
+	// called to interrupt the potential blocking on other goroutines.
+	s.cancel()
+
+	s.swapState(streamDone)
+	t.deleteStream(s, eosReceived)
+
+	t.controlBuf.put(&cleanupStream{
+		streamID: s.id,
+		rst:      rst,
+		rstCode:  rstCode,
+		onWrite:  func() {},
+	})
+}
+
+func (t *http2Server) RemoteAddr() net.Addr {
+	return t.remoteAddr
+}
+
+func (t *http2Server) Drain() {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.drainEvent != nil {
+		return
+	}
+	t.drainEvent = grpcsync.NewEvent()
+	t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
+}
+
+var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
+
+// Handles outgoing GoAway and returns true if loopy needs to put itself
+// in draining mode.
+func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
+	t.maxStreamMu.Lock()
+	t.mu.Lock()
+	if t.state == closing { // TODO(mmukhi): This seems unnecessary.
+		t.mu.Unlock()
+		t.maxStreamMu.Unlock()
+		// The transport is closing.
+		return false, ErrConnClosing
+	}
+	if !g.headsUp {
+		// Stop accepting more streams now.
+		t.state = draining
+		sid := t.maxStreamID
+		retErr := g.closeConn
+		if len(t.activeStreams) == 0 {
+			retErr = errors.New("second GOAWAY written and no active streams left to process")
+		}
+		t.mu.Unlock()
+		t.maxStreamMu.Unlock()
+		if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
+			return false, err
+		}
+		if retErr != nil {
+			// Abruptly close the connection following the GoAway (via
+			// loopywriter).  But flush out what's inside the buffer first.
+			t.framer.writer.Flush()
+			return false, retErr
+		}
+		return true, nil
+	}
+	t.mu.Unlock()
+	t.maxStreamMu.Unlock()
+	// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
+	// Follow that with a ping and wait for the ack to come back or a timer
+	// to expire. During this time accept new streams since they might have
+	// originated before the GoAway reaches the client.
+	// After getting the ack or timer expiration send out another GoAway this
+	// time with an ID of the max stream server intends to process.
+	if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
+		return false, err
+	}
+	if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
+		return false, err
+	}
+	go func() {
+		timer := time.NewTimer(time.Minute)
+		defer timer.Stop()
+		select {
+		case <-t.drainEvent.Done():
+		case <-timer.C:
+		case <-t.done:
+			return
+		}
+		t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
+	}()
+	return false, nil
+}
+
+func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
+	s := channelz.SocketInternalMetric{
+		StreamsStarted:                   atomic.LoadInt64(&t.czData.streamsStarted),
+		StreamsSucceeded:                 atomic.LoadInt64(&t.czData.streamsSucceeded),
+		StreamsFailed:                    atomic.LoadInt64(&t.czData.streamsFailed),
+		MessagesSent:                     atomic.LoadInt64(&t.czData.msgSent),
+		MessagesReceived:                 atomic.LoadInt64(&t.czData.msgRecv),
+		KeepAlivesSent:                   atomic.LoadInt64(&t.czData.kpCount),
+		LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
+		LastMessageSentTimestamp:         time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
+		LastMessageReceivedTimestamp:     time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
+		LocalFlowControlWindow:           int64(t.fc.getSize()),
+		SocketOptions:                    channelz.GetSocketOption(t.conn),
+		LocalAddr:                        t.localAddr,
+		RemoteAddr:                       t.remoteAddr,
+		// RemoteName :
+	}
+	if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
+		s.Security = au.GetSecurityValue()
+	}
+	s.RemoteFlowControlWindow = t.getOutFlowWindow()
+	return &s
+}
+
+func (t *http2Server) IncrMsgSent() {
+	atomic.AddInt64(&t.czData.msgSent, 1)
+	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
+}
+
+func (t *http2Server) IncrMsgRecv() {
+	atomic.AddInt64(&t.czData.msgRecv, 1)
+	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
+}
+
+func (t *http2Server) getOutFlowWindow() int64 {
+	resp := make(chan uint32, 1)
+	timer := time.NewTimer(time.Second)
+	defer timer.Stop()
+	t.controlBuf.put(&outFlowControlSizeRequest{resp})
+	select {
+	case sz := <-resp:
+		return int64(sz)
+	case <-t.done:
+		return -1
+	case <-timer.C:
+		return -2
+	}
+}
+
+func (t *http2Server) getPeer() *peer.Peer {
+	return &peer.Peer{
+		Addr:     t.remoteAddr,
+		AuthInfo: t.authInfo, // Can be nil
+	}
+}
+
+func getJitter(v time.Duration) time.Duration {
+	if v == infinity {
+		return 0
+	}
+	// Generate a jitter between +/- 10% of the value.
+	r := int64(v / 10)
+	j := grpcrand.Int63n(2*r) - r
+	return time.Duration(j)
+}
+
+type connectionKey struct{}
+
+// GetConnection gets the connection from the context.
+func GetConnection(ctx context.Context) net.Conn {
+	conn, _ := ctx.Value(connectionKey{}).(net.Conn)
+	return conn
+}
+
+// SetConnection adds the connection to the context to be able to get
+// information about the destination ip and port for an incoming RPC. This also
+// allows any unary or streaming interceptors to see the connection.
+func setConnection(ctx context.Context, conn net.Conn) context.Context {
+	return context.WithValue(ctx, connectionKey{}, conn)
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
new file mode 100644
index 000000000..2c601a864
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -0,0 +1,412 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bufio"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	spb "google.golang.org/genproto/googleapis/rpc/status"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	// http2MaxFrameLen specifies the max length of a HTTP2 frame.
+	http2MaxFrameLen = 16384 // 16KB frame
+	// https://httpwg.org/specs/rfc7540.html#SettingValues
+	http2InitHeaderTableSize = 4096
+)
+
+var (
+	clientPreface   = []byte(http2.ClientPreface)
+	http2ErrConvTab = map[http2.ErrCode]codes.Code{
+		http2.ErrCodeNo:                 codes.Internal,
+		http2.ErrCodeProtocol:           codes.Internal,
+		http2.ErrCodeInternal:           codes.Internal,
+		http2.ErrCodeFlowControl:        codes.ResourceExhausted,
+		http2.ErrCodeSettingsTimeout:    codes.Internal,
+		http2.ErrCodeStreamClosed:       codes.Internal,
+		http2.ErrCodeFrameSize:          codes.Internal,
+		http2.ErrCodeRefusedStream:      codes.Unavailable,
+		http2.ErrCodeCancel:             codes.Canceled,
+		http2.ErrCodeCompression:        codes.Internal,
+		http2.ErrCodeConnect:            codes.Internal,
+		http2.ErrCodeEnhanceYourCalm:    codes.ResourceExhausted,
+		http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
+		http2.ErrCodeHTTP11Required:     codes.Internal,
+	}
+	// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
+	HTTPStatusConvTab = map[int]codes.Code{
+		// 400 Bad Request - INTERNAL.
+		http.StatusBadRequest: codes.Internal,
+		// 401 Unauthorized  - UNAUTHENTICATED.
+		http.StatusUnauthorized: codes.Unauthenticated,
+		// 403 Forbidden - PERMISSION_DENIED.
+		http.StatusForbidden: codes.PermissionDenied,
+		// 404 Not Found - UNIMPLEMENTED.
+		http.StatusNotFound: codes.Unimplemented,
+		// 429 Too Many Requests - UNAVAILABLE.
+		http.StatusTooManyRequests: codes.Unavailable,
+		// 502 Bad Gateway - UNAVAILABLE.
+		http.StatusBadGateway: codes.Unavailable,
+		// 503 Service Unavailable - UNAVAILABLE.
+		http.StatusServiceUnavailable: codes.Unavailable,
+		// 504 Gateway timeout - UNAVAILABLE.
+		http.StatusGatewayTimeout: codes.Unavailable,
+	}
+	logger = grpclog.Component("transport")
+)
+
+// isReservedHeader checks whether hdr belongs to HTTP2 headers
+// reserved by gRPC protocol. Any other headers are classified as the
+// user-specified metadata.
+func isReservedHeader(hdr string) bool {
+	if hdr != "" && hdr[0] == ':' {
+		return true
+	}
+	switch hdr {
+	case "content-type",
+		"user-agent",
+		"grpc-message-type",
+		"grpc-encoding",
+		"grpc-message",
+		"grpc-status",
+		"grpc-timeout",
+		"grpc-status-details-bin",
+		// Intentionally exclude grpc-previous-rpc-attempts and
+		// grpc-retry-pushback-ms, which are "reserved", but their API
+		// intentionally works via metadata.
+		"te":
+		return true
+	default:
+		return false
+	}
+}
+
+// isWhitelistedHeader checks whether hdr should be propagated into metadata
+// visible to users, even though it is classified as "reserved", above.
+func isWhitelistedHeader(hdr string) bool {
+	switch hdr {
+	case ":authority", "user-agent":
+		return true
+	default:
+		return false
+	}
+}
+
+const binHdrSuffix = "-bin"
+
+func encodeBinHeader(v []byte) string {
+	return base64.RawStdEncoding.EncodeToString(v)
+}
+
+func decodeBinHeader(v string) ([]byte, error) {
+	if len(v)%4 == 0 {
+		// Input was padded, or padding was not necessary.
+		return base64.StdEncoding.DecodeString(v)
+	}
+	return base64.RawStdEncoding.DecodeString(v)
+}
+
+func encodeMetadataHeader(k, v string) string {
+	if strings.HasSuffix(k, binHdrSuffix) {
+		return encodeBinHeader(([]byte)(v))
+	}
+	return v
+}
+
+func decodeMetadataHeader(k, v string) (string, error) {
+	if strings.HasSuffix(k, binHdrSuffix) {
+		b, err := decodeBinHeader(v)
+		return string(b), err
+	}
+	return v, nil
+}
+
+func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) {
+	v, err := decodeBinHeader(rawDetails)
+	if err != nil {
+		return nil, err
+	}
+	st := &spb.Status{}
+	if err = proto.Unmarshal(v, st); err != nil {
+		return nil, err
+	}
+	return status.FromProto(st), nil
+}
+
+type timeoutUnit uint8
+
+const (
+	hour        timeoutUnit = 'H'
+	minute      timeoutUnit = 'M'
+	second      timeoutUnit = 'S'
+	millisecond timeoutUnit = 'm'
+	microsecond timeoutUnit = 'u'
+	nanosecond  timeoutUnit = 'n'
+)
+
+func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
+	switch u {
+	case hour:
+		return time.Hour, true
+	case minute:
+		return time.Minute, true
+	case second:
+		return time.Second, true
+	case millisecond:
+		return time.Millisecond, true
+	case microsecond:
+		return time.Microsecond, true
+	case nanosecond:
+		return time.Nanosecond, true
+	default:
+	}
+	return
+}
+
+func decodeTimeout(s string) (time.Duration, error) {
+	size := len(s)
+	if size < 2 {
+		return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
+	}
+	if size > 9 {
+		// Spec allows for 8 digits plus the unit.
+		return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
+	}
+	unit := timeoutUnit(s[size-1])
+	d, ok := timeoutUnitToDuration(unit)
+	if !ok {
+		return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
+	}
+	t, err := strconv.ParseInt(s[:size-1], 10, 64)
+	if err != nil {
+		return 0, err
+	}
+	const maxHours = math.MaxInt64 / int64(time.Hour)
+	if d == time.Hour && t > maxHours {
+		// This timeout would overflow math.MaxInt64; clamp it.
+		return time.Duration(math.MaxInt64), nil
+	}
+	return d * time.Duration(t), nil
+}
+
+const (
+	spaceByte   = ' '
+	tildeByte   = '~'
+	percentByte = '%'
+)
+
+// encodeGrpcMessage is used to encode status code in header field
+// "grpc-message". It does percent encoding and also replaces invalid utf-8
+// characters with Unicode replacement character.
+//
+// It checks to see if each individual byte in msg is an allowable byte, and
+// then either percent encoding or passing it through. When percent encoding,
+// the byte is converted into hexadecimal notation with a '%' prepended.
+func encodeGrpcMessage(msg string) string {
+	if msg == "" {
+		return ""
+	}
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		c := msg[i]
+		if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
+			return encodeGrpcMessageUnchecked(msg)
+		}
+	}
+	return msg
+}
+
+func encodeGrpcMessageUnchecked(msg string) string {
+	var sb strings.Builder
+	for len(msg) > 0 {
+		r, size := utf8.DecodeRuneInString(msg)
+		for _, b := range []byte(string(r)) {
+			if size > 1 {
+				// If size > 1, r is not ascii. Always do percent encoding.
+				fmt.Fprintf(&sb, "%%%02X", b)
+				continue
+			}
+
+			// The for loop is necessary even if size == 1. r could be
+			// utf8.RuneError.
+			//
+			// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
+			if b >= spaceByte && b <= tildeByte && b != percentByte {
+				sb.WriteByte(b)
+			} else {
+				fmt.Fprintf(&sb, "%%%02X", b)
+			}
+		}
+		msg = msg[size:]
+	}
+	return sb.String()
+}
+
+// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
+func decodeGrpcMessage(msg string) string {
+	if msg == "" {
+		return ""
+	}
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		if msg[i] == percentByte && i+2 < lenMsg {
+			return decodeGrpcMessageUnchecked(msg)
+		}
+	}
+	return msg
+}
+
+func decodeGrpcMessageUnchecked(msg string) string {
+	var sb strings.Builder
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		c := msg[i]
+		if c == percentByte && i+2 < lenMsg {
+			parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
+			if err != nil {
+				sb.WriteByte(c)
+			} else {
+				sb.WriteByte(byte(parsed))
+				i += 2
+			}
+		} else {
+			sb.WriteByte(c)
+		}
+	}
+	return sb.String()
+}
+
+type bufWriter struct {
+	buf       []byte
+	offset    int
+	batchSize int
+	conn      net.Conn
+	err       error
+}
+
+func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
+	return &bufWriter{
+		buf:       make([]byte, batchSize*2),
+		batchSize: batchSize,
+		conn:      conn,
+	}
+}
+
+func (w *bufWriter) Write(b []byte) (n int, err error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	if w.batchSize == 0 { // Buffer has been disabled.
+		return w.conn.Write(b)
+	}
+	for len(b) > 0 {
+		nn := copy(w.buf[w.offset:], b)
+		b = b[nn:]
+		w.offset += nn
+		n += nn
+		if w.offset >= w.batchSize {
+			err = w.Flush()
+		}
+	}
+	return n, err
+}
+
+func (w *bufWriter) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if w.offset == 0 {
+		return nil
+	}
+	_, w.err = w.conn.Write(w.buf[:w.offset])
+	w.offset = 0
+	return w.err
+}
+
+type framer struct {
+	writer *bufWriter
+	fr     *http2.Framer
+}
+
+func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
+	if writeBufferSize < 0 {
+		writeBufferSize = 0
+	}
+	var r io.Reader = conn
+	if readBufferSize > 0 {
+		r = bufio.NewReaderSize(r, readBufferSize)
+	}
+	w := newBufWriter(conn, writeBufferSize)
+	f := &framer{
+		writer: w,
+		fr:     http2.NewFramer(w, r),
+	}
+	f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
+	// Opt-in to Frame reuse API on framer to reduce garbage.
+	// Frames aren't safe to read from after a subsequent call to ReadFrame.
+	f.fr.SetReuseFrames()
+	f.fr.MaxHeaderListSize = maxHeaderListSize
+	f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
+	return f
+}
+
+// parseDialTarget returns the network and address to pass to dialer.
+func parseDialTarget(target string) (string, string) {
+	net := "tcp"
+	m1 := strings.Index(target, ":")
+	m2 := strings.Index(target, ":/")
+	// handle unix:addr which will fail with url.Parse
+	if m1 >= 0 && m2 < 0 {
+		if n := target[0:m1]; n == "unix" {
+			return n, target[m1+1:]
+		}
+	}
+	if m2 >= 0 {
+		t, err := url.Parse(target)
+		if err != nil {
+			return net, target
+		}
+		scheme := t.Scheme
+		addr := t.Path
+		if scheme == "unix" {
+			if addr == "" {
+				addr = t.Host
+			}
+			return scheme, addr
+		}
+	}
+	return net, target
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
new file mode 100644
index 000000000..c11b52782
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package networktype declares the network type to be used in the default
+// dialer. Attribute of a resolver.Address.
+package networktype
+
+import (
+	"google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.internal.transport.networktype")
+
+// Set returns a copy of the provided address with attributes containing networkType.
+func Set(address resolver.Address, networkType string) resolver.Address {
+	address.Attributes = address.Attributes.WithValue(key, networkType)
+	return address
+}
+
+// Get returns the network type in the resolver.Address and true, or "", false
+// if not present.
+func Get(address resolver.Address) (string, bool) {
+	v := address.Attributes.Value(key)
+	if v == nil {
+		return "", false
+	}
+	return v.(string), true
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
new file mode 100644
index 000000000..415961987
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -0,0 +1,142 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bufio"
+	"context"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+)
+
+const proxyAuthHeaderKey = "Proxy-Authorization"
+
+var (
+	// The following variable will be overwritten in the tests.
+	httpProxyFromEnvironment = http.ProxyFromEnvironment
+)
+
+func mapAddress(address string) (*url.URL, error) {
+	req := &http.Request{
+		URL: &url.URL{
+			Scheme: "https",
+			Host:   address,
+		},
+	}
+	url, err := httpProxyFromEnvironment(req)
+	if err != nil {
+		return nil, err
+	}
+	return url, nil
+}
+
+// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
+// It's possible that this reader reads more than what's need for the response and stores
+// those bytes in the buffer.
+// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
+// bytes in the buffer.
+type bufConn struct {
+	net.Conn
+	r io.Reader
+}
+
+func (c *bufConn) Read(b []byte) (int, error) {
+	return c.r.Read(b)
+}
+
+func basicAuth(username, password string) string {
+	auth := username + ":" + password
+	return base64.StdEncoding.EncodeToString([]byte(auth))
+}
+
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) {
+	defer func() {
+		if err != nil {
+			conn.Close()
+		}
+	}()
+
+	req := &http.Request{
+		Method: http.MethodConnect,
+		URL:    &url.URL{Host: backendAddr},
+		Header: map[string][]string{"User-Agent": {grpcUA}},
+	}
+	if t := proxyURL.User; t != nil {
+		u := t.Username()
+		p, _ := t.Password()
+		req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
+	}
+
+	if err := sendHTTPRequest(ctx, req, conn); err != nil {
+		return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
+	}
+
+	r := bufio.NewReader(conn)
+	resp, err := http.ReadResponse(r, req)
+	if err != nil {
+		return nil, fmt.Errorf("reading server HTTP response: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		dump, err := httputil.DumpResponse(resp, true)
+		if err != nil {
+			return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
+		}
+		return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
+	}
+
+	return &bufConn{Conn: conn, r: r}, nil
+}
+
+// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
+// is necessary, dials, does the HTTP CONNECT handshake, and returns the
+// connection.
+func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
+	newAddr := addr
+	proxyURL, err := mapAddress(addr)
+	if err != nil {
+		return nil, err
+	}
+	if proxyURL != nil {
+		newAddr = proxyURL.Host
+	}
+
+	conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
+	if err != nil {
+		return
+	}
+	if proxyURL != nil {
+		// proxy is disabled if proxyURL is nil.
+		conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+	}
+	return
+}
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+	req = req.WithContext(ctx)
+	if err := req.Write(conn); err != nil {
+		return fmt.Errorf("failed to write the HTTP request: %v", err)
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
new file mode 100644
index 000000000..0ac77ea4f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -0,0 +1,823 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package transport defines and implements message oriented communication
+// channel to complete various transactions (e.g., an RPC).  It is meant for
+// grpc-internal usage and is not intended to be imported directly by users.
+package transport
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+	"google.golang.org/grpc/tap"
+)
+
+// ErrNoHeaders is used as a signal that a trailers only response was received,
+// and is not a real error.
+var ErrNoHeaders = errors.New("stream has no headers")
+
+const logLevel = 2
+
+type bufferPool struct {
+	pool sync.Pool
+}
+
+func newBufferPool() *bufferPool {
+	return &bufferPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(bytes.Buffer)
+			},
+		},
+	}
+}
+
+func (p *bufferPool) get() *bytes.Buffer {
+	return p.pool.Get().(*bytes.Buffer)
+}
+
+func (p *bufferPool) put(b *bytes.Buffer) {
+	p.pool.Put(b)
+}
+
+// recvMsg represents the received msg from the transport. All transport
+// protocol specific info has been removed.
+type recvMsg struct {
+	buffer *bytes.Buffer
+	// nil: received some data
+	// io.EOF: stream is completed. data is nil.
+	// other non-nil error: transport failure. data is nil.
+	err error
+}
+
+// recvBuffer is an unbounded channel of recvMsg structs.
+//
+// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
+// holds a channel of recvMsg structs instead of objects implementing "item"
+// interface. recvBuffer is written to much more often and using strict recvMsg
+// structs helps avoid allocation in "recvBuffer.put"
+type recvBuffer struct {
+	c       chan recvMsg
+	mu      sync.Mutex
+	backlog []recvMsg
+	err     error
+}
+
+func newRecvBuffer() *recvBuffer {
+	b := &recvBuffer{
+		c: make(chan recvMsg, 1),
+	}
+	return b
+}
+
+func (b *recvBuffer) put(r recvMsg) {
+	b.mu.Lock()
+	if b.err != nil {
+		b.mu.Unlock()
+		// An error had occurred earlier, don't accept more
+		// data or errors.
+		return
+	}
+	b.err = r.err
+	if len(b.backlog) == 0 {
+		select {
+		case b.c <- r:
+			b.mu.Unlock()
+			return
+		default:
+		}
+	}
+	b.backlog = append(b.backlog, r)
+	b.mu.Unlock()
+}
+
+func (b *recvBuffer) load() {
+	b.mu.Lock()
+	if len(b.backlog) > 0 {
+		select {
+		case b.c <- b.backlog[0]:
+			b.backlog[0] = recvMsg{}
+			b.backlog = b.backlog[1:]
+		default:
+		}
+	}
+	b.mu.Unlock()
+}
+
+// get returns the channel that receives a recvMsg in the buffer.
+//
+// Upon receipt of a recvMsg, the caller should call load to send another
+// recvMsg onto the channel if there is any.
+func (b *recvBuffer) get() <-chan recvMsg {
+	return b.c
+}
+
+// recvBufferReader implements io.Reader interface to read the data from
+// recvBuffer.
+type recvBufferReader struct {
+	closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
+	ctx         context.Context
+	ctxDone     <-chan struct{} // cache of ctx.Done() (for performance).
+	recv        *recvBuffer
+	last        *bytes.Buffer // Stores the remaining data in the previous calls.
+	err         error
+	freeBuffer  func(*bytes.Buffer)
+}
+
+// Read reads the next len(p) bytes from last. If last is drained, it tries to
+// read additional data from recv. It blocks if there no additional data available
+// in recv. If Read returns any non-nil error, it will continue to return that error.
+func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	if r.last != nil {
+		// Read remaining data left in last call.
+		copied, _ := r.last.Read(p)
+		if r.last.Len() == 0 {
+			r.freeBuffer(r.last)
+			r.last = nil
+		}
+		return copied, nil
+	}
+	if r.closeStream != nil {
+		n, r.err = r.readClient(p)
+	} else {
+		n, r.err = r.read(p)
+	}
+	return n, r.err
+}
+
+func (r *recvBufferReader) read(p []byte) (n int, err error) {
+	select {
+	case <-r.ctxDone:
+		return 0, ContextErr(r.ctx.Err())
+	case m := <-r.recv.get():
+		return r.readAdditional(m, p)
+	}
+}
+
+func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
+	// If the context is canceled, then closes the stream with nil metadata.
+	// closeStream writes its error parameter to r.recv as a recvMsg.
+	// r.readAdditional acts on that message and returns the necessary error.
+	select {
+	case <-r.ctxDone:
+		// Note that this adds the ctx error to the end of recv buffer, and
+		// reads from the head. This will delay the error until recv buffer is
+		// empty, thus will delay ctx cancellation in Recv().
+		//
+		// It's done this way to fix a race between ctx cancel and trailer. The
+		// race was, stream.Recv() may return ctx error if ctxDone wins the
+		// race, but stream.Trailer() may return a non-nil md because the stream
+		// was not marked as done when trailer is received. This closeStream
+		// call will mark stream as done, thus fix the race.
+		//
+		// TODO: delaying ctx error seems like a unnecessary side effect. What
+		// we really want is to mark the stream as done, and return ctx error
+		// faster.
+		r.closeStream(ContextErr(r.ctx.Err()))
+		m := <-r.recv.get()
+		return r.readAdditional(m, p)
+	case m := <-r.recv.get():
+		return r.readAdditional(m, p)
+	}
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
+	r.recv.load()
+	if m.err != nil {
+		return 0, m.err
+	}
+	copied, _ := m.buffer.Read(p)
+	if m.buffer.Len() == 0 {
+		r.freeBuffer(m.buffer)
+		r.last = nil
+	} else {
+		r.last = m.buffer
+	}
+	return copied, nil
+}
+
+type streamState uint32
+
+const (
+	streamActive    streamState = iota
+	streamWriteDone             // EndStream sent
+	streamReadDone              // EndStream received
+	streamDone                  // the entire stream is finished.
+)
+
+// Stream represents an RPC in the transport layer.
+type Stream struct {
+	id           uint32
+	st           ServerTransport    // nil for client side Stream
+	ct           *http2Client       // nil for server side Stream
+	ctx          context.Context    // the associated context of the stream
+	cancel       context.CancelFunc // always nil for client side Stream
+	done         chan struct{}      // closed at the end of stream to unblock writers. On the client side.
+	doneFunc     func()             // invoked at the end of stream on client side.
+	ctxDone      <-chan struct{}    // same as done chan but for server side. Cache of ctx.Done() (for performance)
+	method       string             // the associated RPC method of the stream
+	recvCompress string
+	sendCompress string
+	buf          *recvBuffer
+	trReader     io.Reader
+	fc           *inFlow
+	wq           *writeQuota
+
+	// Callback to state application's intentions to read data. This
+	// is used to adjust flow control, if needed.
+	requestRead func(int)
+
+	headerChan       chan struct{} // closed to indicate the end of header metadata.
+	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+	// headerValid indicates whether a valid header was received.  Only
+	// meaningful after headerChan is closed (always call waitOnHeader() before
+	// reading its value).  Not valid on server side.
+	headerValid bool
+
+	// hdrMu protects header and trailer metadata on the server-side.
+	hdrMu sync.Mutex
+	// On client side, header keeps the received header metadata.
+	//
+	// On server side, header keeps the header set by SetHeader(). The complete
+	// header will merged into this after t.WriteHeader() is called.
+	header  metadata.MD
+	trailer metadata.MD // the key-value map of trailer metadata.
+
+	noHeaders bool // set if the client never received headers (set only after the stream is done).
+
+	// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
+	headerSent uint32
+
+	state streamState
+
+	// On client-side it is the status error received from the server.
+	// On server-side it is unused.
+	status *status.Status
+
+	bytesReceived uint32 // indicates whether any bytes have been received on this stream
+	unprocessed   uint32 // set if the server sends a refused stream or GOAWAY including this stream
+
+	// contentSubtype is the content-subtype for requests.
+	// this must be lowercase or the behavior is undefined.
+	contentSubtype string
+}
+
+// isHeaderSent is only valid on the server-side.
+func (s *Stream) isHeaderSent() bool {
+	return atomic.LoadUint32(&s.headerSent) == 1
+}
+
+// updateHeaderSent updates headerSent and returns true
+// if it was alreay set. It is valid only on server-side.
+func (s *Stream) updateHeaderSent() bool {
+	return atomic.SwapUint32(&s.headerSent, 1) == 1
+}
+
+func (s *Stream) swapState(st streamState) streamState {
+	return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
+}
+
+func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
+	return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
+}
+
+func (s *Stream) getState() streamState {
+	return streamState(atomic.LoadUint32((*uint32)(&s.state)))
+}
+
+func (s *Stream) waitOnHeader() {
+	if s.headerChan == nil {
+		// On the server headerChan is always nil since a stream originates
+		// only after having received headers.
+		return
+	}
+	select {
+	case <-s.ctx.Done():
+		// Close the stream to prevent headers/trailers from changing after
+		// this function returns.
+		s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
+		// headerChan could possibly not be closed yet if closeStream raced
+		// with operateHeaders; wait until it is closed explicitly here.
+		<-s.headerChan
+	case <-s.headerChan:
+	}
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *Stream) RecvCompress() string {
+	s.waitOnHeader()
+	return s.recvCompress
+}
+
+// SetSendCompress sets the compression algorithm to the stream.
+func (s *Stream) SetSendCompress(str string) {
+	s.sendCompress = str
+}
+
+// Done returns a channel which is closed when it receives the final status
+// from the server.
+func (s *Stream) Done() <-chan struct{} {
+	return s.done
+}
+
+// Header returns the header metadata of the stream.
+//
+// On client side, it acquires the key-value pairs of header metadata once it is
+// available. It blocks until i) the metadata is ready or ii) there is no header
+// metadata or iii) the stream is canceled/expired.
+//
+// On server side, it returns the out header after t.WriteHeader is called.  It
+// does not block and must not be called until after WriteHeader.
+func (s *Stream) Header() (metadata.MD, error) {
+	if s.headerChan == nil {
+		// On server side, return the header in stream. It will be the out
+		// header after t.WriteHeader is called.
+		return s.header.Copy(), nil
+	}
+	s.waitOnHeader()
+
+	if !s.headerValid {
+		return nil, s.status.Err()
+	}
+
+	if s.noHeaders {
+		return nil, ErrNoHeaders
+	}
+
+	return s.header.Copy(), nil
+}
+
+// TrailersOnly blocks until a header or trailers-only frame is received and
+// then returns true if the stream was trailers-only.  If the stream ends
+// before headers are received, returns true, nil.  Client-side only.
+func (s *Stream) TrailersOnly() bool {
+	s.waitOnHeader()
+	return s.noHeaders
+}
+
+// Trailer returns the cached trailer metedata. Note that if it is not called
+// after the entire stream is done, it could return an empty MD. Client
+// side only.
+// It can be safely read only after stream has ended that is either read
+// or write have returned io.EOF.
+func (s *Stream) Trailer() metadata.MD {
+	c := s.trailer.Copy()
+	return c
+}
+
+// ContentSubtype returns the content-subtype for a request. For example, a
+// content-subtype of "proto" will result in a content-type of
+// "application/grpc+proto". This will always be lowercase.  See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+func (s *Stream) ContentSubtype() string {
+	return s.contentSubtype
+}
+
+// Context returns the context of the stream.
+func (s *Stream) Context() context.Context {
+	return s.ctx
+}
+
+// Method returns the method for the stream.
+func (s *Stream) Method() string {
+	return s.method
+}
+
+// Status returns the status received from the server.
+// Status can be read safely only after the stream has ended,
+// that is, after Done() is closed.
+func (s *Stream) Status() *status.Status {
+	return s.status
+}
+
+// SetHeader sets the header metadata. This can be called multiple times.
+// Server side only.
+// This should not be called in parallel to other data writes.
+func (s *Stream) SetHeader(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	if s.isHeaderSent() || s.getState() == streamDone {
+		return ErrIllegalHeaderWrite
+	}
+	s.hdrMu.Lock()
+	s.header = metadata.Join(s.header, md)
+	s.hdrMu.Unlock()
+	return nil
+}
+
+// SendHeader sends the given header metadata. The given metadata is
+// combined with any metadata set by previous calls to SetHeader and
+// then written to the transport stream.
+func (s *Stream) SendHeader(md metadata.MD) error {
+	return s.st.WriteHeader(s, md)
+}
+
+// SetTrailer sets the trailer metadata which will be sent with the RPC status
+// by the server. This can be called multiple times. Server side only.
+// This should not be called parallel to other data writes.
+func (s *Stream) SetTrailer(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	if s.getState() == streamDone {
+		return ErrIllegalHeaderWrite
+	}
+	s.hdrMu.Lock()
+	s.trailer = metadata.Join(s.trailer, md)
+	s.hdrMu.Unlock()
+	return nil
+}
+
+func (s *Stream) write(m recvMsg) {
+	s.buf.put(m)
+}
+
+// Read reads all p bytes from the wire for this stream.
+func (s *Stream) Read(p []byte) (n int, err error) {
+	// Don't request a read if there was an error earlier
+	if er := s.trReader.(*transportReader).er; er != nil {
+		return 0, er
+	}
+	s.requestRead(len(p))
+	return io.ReadFull(s.trReader, p)
+}
+
+// tranportReader reads all the data available for this Stream from the transport and
+// passes them into the decoder, which converts them into a gRPC message stream.
+// The error is io.EOF when the stream is done or another non-nil error if
+// the stream broke.
+type transportReader struct {
+	reader io.Reader
+	// The handler to control the window update procedure for both this
+	// particular stream and the associated transport.
+	windowHandler func(int)
+	er            error
+}
+
+func (t *transportReader) Read(p []byte) (n int, err error) {
+	n, err = t.reader.Read(p)
+	if err != nil {
+		t.er = err
+		return
+	}
+	t.windowHandler(n)
+	return
+}
+
+// BytesReceived indicates whether any bytes have been received on this stream.
+func (s *Stream) BytesReceived() bool {
+	return atomic.LoadUint32(&s.bytesReceived) == 1
+}
+
+// Unprocessed indicates whether the server did not process this stream --
+// i.e. it sent a refused stream or GOAWAY including this stream ID.
+func (s *Stream) Unprocessed() bool {
+	return atomic.LoadUint32(&s.unprocessed) == 1
+}
+
+// GoString is implemented by Stream so context.String() won't
+// race when printing %#v.
+func (s *Stream) GoString() string {
+	return fmt.Sprintf("<stream: %p, %v>", s, s.method)
+}
+
+// state of transport
+type transportState int
+
+const (
+	reachable transportState = iota
+	closing
+	draining
+)
+
+// ServerConfig consists of all the configurations to establish a server transport.
+type ServerConfig struct {
+	MaxStreams            uint32
+	ConnectionTimeout     time.Duration
+	Credentials           credentials.TransportCredentials
+	InTapHandle           tap.ServerInHandle
+	StatsHandlers         []stats.Handler
+	KeepaliveParams       keepalive.ServerParameters
+	KeepalivePolicy       keepalive.EnforcementPolicy
+	InitialWindowSize     int32
+	InitialConnWindowSize int32
+	WriteBufferSize       int
+	ReadBufferSize        int
+	ChannelzParentID      *channelz.Identifier
+	MaxHeaderListSize     *uint32
+	HeaderTableSize       *uint32
+}
+
+// ConnectOptions covers all relevant options for communicating with the server.
+type ConnectOptions struct {
+	// UserAgent is the application user agent.
+	UserAgent string
+	// Dialer specifies how to dial a network address.
+	Dialer func(context.Context, string) (net.Conn, error)
+	// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
+	FailOnNonTempDialError bool
+	// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
+	PerRPCCredentials []credentials.PerRPCCredentials
+	// TransportCredentials stores the Authenticator required to setup a client
+	// connection. Only one of TransportCredentials and CredsBundle is non-nil.
+	TransportCredentials credentials.TransportCredentials
+	// CredsBundle is the credentials bundle to be used. Only one of
+	// TransportCredentials and CredsBundle is non-nil.
+	CredsBundle credentials.Bundle
+	// KeepaliveParams stores the keepalive parameters.
+	KeepaliveParams keepalive.ClientParameters
+	// StatsHandlers stores the handler for stats.
+	StatsHandlers []stats.Handler
+	// InitialWindowSize sets the initial window size for a stream.
+	InitialWindowSize int32
+	// InitialConnWindowSize sets the initial window size for a connection.
+	InitialConnWindowSize int32
+	// WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
+	WriteBufferSize int
+	// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
+	ReadBufferSize int
+	// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
+	ChannelzParentID *channelz.Identifier
+	// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
+	MaxHeaderListSize *uint32
+	// UseProxy specifies if a proxy should be used.
+	UseProxy bool
+}
+
+// NewClientTransport establishes the transport with the required ConnectOptions
+// and returns it to the caller.
+func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
+	return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
+}
+
+// Options provides additional hints and information for message
+// transmission.
+type Options struct {
+	// Last indicates whether this write is the last piece for
+	// this stream.
+	Last bool
+}
+
+// CallHdr carries the information of a particular RPC.
+type CallHdr struct {
+	// Host specifies the peer's host.
+	Host string
+
+	// Method specifies the operation to perform.
+	Method string
+
+	// SendCompress specifies the compression algorithm applied on
+	// outbound message.
+	SendCompress string
+
+	// Creds specifies credentials.PerRPCCredentials for a call.
+	Creds credentials.PerRPCCredentials
+
+	// ContentSubtype specifies the content-subtype for a request. For example, a
+	// content-subtype of "proto" will result in a content-type of
+	// "application/grpc+proto". The value of ContentSubtype must be all
+	// lowercase, otherwise the behavior is undefined. See
+	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+	// for more details.
+	ContentSubtype string
+
+	PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
+
+	DoneFunc func() // called when the stream is finished
+}
+
+// ClientTransport is the common interface for all gRPC client-side transport
+// implementations.
+type ClientTransport interface {
+	// Close tears down this transport. Once it returns, the transport
+	// should not be accessed any more. The caller must make sure this
+	// is called only once.
+	Close(err error)
+
+	// GracefulClose starts to tear down the transport: the transport will stop
+	// accepting new RPCs and NewStream will return error. Once all streams are
+	// finished, the transport will close.
+	//
+	// It does not block.
+	GracefulClose()
+
+	// Write sends the data for the given stream. A nil stream indicates
+	// the write is to be performed on the transport as a whole.
+	Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+
+	// NewStream creates a Stream for an RPC.
+	NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
+
+	// CloseStream clears the footprint of a stream when the stream is
+	// not needed any more. The err indicates the error incurred when
+	// CloseStream is called. Must be called when a stream is finished
+	// unless the associated transport is closing.
+	CloseStream(stream *Stream, err error)
+
+	// Error returns a channel that is closed when some I/O error
+	// happens. Typically the caller should have a goroutine to monitor
+	// this in order to take action (e.g., close the current transport
+	// and create a new one) in error case. It should not return nil
+	// once the transport is initiated.
+	Error() <-chan struct{}
+
+	// GoAway returns a channel that is closed when ClientTransport
+	// receives the draining signal from the server (e.g., GOAWAY frame in
+	// HTTP/2).
+	GoAway() <-chan struct{}
+
+	// GetGoAwayReason returns the reason why GoAway frame was received, along
+	// with a human readable string with debug info.
+	GetGoAwayReason() (GoAwayReason, string)
+
+	// RemoteAddr returns the remote network address.
+	RemoteAddr() net.Addr
+
+	// IncrMsgSent increments the number of message sent through this transport.
+	IncrMsgSent()
+
+	// IncrMsgRecv increments the number of message received through this transport.
+	IncrMsgRecv()
+}
+
+// ServerTransport is the common interface for all gRPC server-side transport
+// implementations.
+//
+// Methods may be called concurrently from multiple goroutines, but
+// Write methods for a given Stream will be called serially.
+type ServerTransport interface {
+	// HandleStreams receives incoming streams using the given handler.
+	HandleStreams(func(*Stream), func(context.Context, string) context.Context)
+
+	// WriteHeader sends the header metadata for the given stream.
+	// WriteHeader may not be called on all streams.
+	WriteHeader(s *Stream, md metadata.MD) error
+
+	// Write sends the data for the given stream.
+	// Write may not be called on all streams.
+	Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+
+	// WriteStatus sends the status of a stream to the client.  WriteStatus is
+	// the final call made on a stream and always occurs.
+	WriteStatus(s *Stream, st *status.Status) error
+
+	// Close tears down the transport. Once it is called, the transport
+	// should not be accessed any more. All the pending streams and their
+	// handlers will be terminated asynchronously.
+	Close(err error)
+
+	// RemoteAddr returns the remote network address.
+	RemoteAddr() net.Addr
+
+	// Drain notifies the client this ServerTransport stops accepting new RPCs.
+	Drain()
+
+	// IncrMsgSent increments the number of message sent through this transport.
+	IncrMsgSent()
+
+	// IncrMsgRecv increments the number of message received through this transport.
+	IncrMsgRecv()
+}
+
+// connectionErrorf creates an ConnectionError with the specified error description.
+func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
+	return ConnectionError{
+		Desc: fmt.Sprintf(format, a...),
+		temp: temp,
+		err:  e,
+	}
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection and the retry of all the active streams.
+type ConnectionError struct {
+	Desc string
+	temp bool
+	err  error
+}
+
+func (e ConnectionError) Error() string {
+	return fmt.Sprintf("connection error: desc = %q", e.Desc)
+}
+
+// Temporary indicates if this connection error is temporary or fatal.
+func (e ConnectionError) Temporary() bool {
+	return e.temp
+}
+
+// Origin returns the original error of this connection error.
+func (e ConnectionError) Origin() error {
+	// Never return nil error here.
+	// If the original error is nil, return itself.
+	if e.err == nil {
+		return e
+	}
+	return e.err
+}
+
+// Unwrap returns the original error of this connection error or nil when the
+// origin is nil.
+func (e ConnectionError) Unwrap() error {
+	return e.err
+}
+
+var (
+	// ErrConnClosing indicates that the transport is closing.
+	ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
+	// errStreamDrain indicates that the stream is rejected because the
+	// connection is draining. This could be caused by goaway or balancer
+	// removing the address.
+	errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
+	// errStreamDone is returned from write at the client side to indiacte application
+	// layer of an error.
+	errStreamDone = errors.New("the stream is done")
+	// StatusGoAway indicates that the server sent a GOAWAY that included this
+	// stream's ID in unprocessed RPCs.
+	statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
+)
+
+// GoAwayReason contains the reason for the GoAway frame received.
+type GoAwayReason uint8
+
+const (
+	// GoAwayInvalid indicates that no GoAway frame is received.
+	GoAwayInvalid GoAwayReason = 0
+	// GoAwayNoReason is the default value when GoAway frame is received.
+	GoAwayNoReason GoAwayReason = 1
+	// GoAwayTooManyPings indicates that a GoAway frame with
+	// ErrCodeEnhanceYourCalm was received and that the debug data said
+	// "too_many_pings".
+	GoAwayTooManyPings GoAwayReason = 2
+)
+
+// channelzData is used to store channelz related data for http2Client and http2Server.
+// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
+// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
+// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
+type channelzData struct {
+	kpCount int64
+	// The number of streams that have started, including already finished ones.
+	streamsStarted int64
+	// Client side: The number of streams that have ended successfully by receiving
+	// EoS bit set frame from server.
+	// Server side: The number of streams that have ended successfully by sending
+	// frame with EoS bit set.
+	streamsSucceeded int64
+	streamsFailed    int64
+	// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
+	// instead of time.Time since it's more costly to atomically update time.Time variable than int64
+	// variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
+	lastStreamCreatedTime int64
+	msgSent               int64
+	msgRecv               int64
+	lastMsgSentTime       int64
+	lastMsgRecvTime       int64
+}
+
+// ContextErr converts the error from context package into a status error.
+func ContextErr(err error) error {
+	switch err {
+	case context.DeadlineExceeded:
+		return status.Error(codes.DeadlineExceeded, err.Error())
+	case context.Canceled:
+		return status.Error(codes.Canceled, err.Error())
+	}
+	return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
+}
diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
new file mode 100644
index 000000000..e8b492774
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package internal
+
+import (
+	"google.golang.org/grpc/attributes"
+	"google.golang.org/grpc/resolver"
+)
+
+// handshakeClusterNameKey is the type used as the key to store cluster name in
+// the Attributes field of resolver.Address.
+type handshakeClusterNameKey struct{}
+
+// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
+// is updated with the cluster name.
+func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
+	addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
+	return addr
+}
+
+// GetXDSHandshakeClusterName returns cluster name stored in attr.
+func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
+	v := attr.Value(handshakeClusterNameKey{})
+	name, ok := v.(string)
+	return name, ok
+}
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
new file mode 100644
index 000000000..34d31b5e7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -0,0 +1,85 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package keepalive defines configurable parameters for point-to-point
+// healthcheck.
+package keepalive
+
+import (
+	"time"
+)
+
+// ClientParameters is used to set keepalive parameters on the client-side.
+// These configure how the client will actively probe to notice when a
+// connection is broken and send pings so intermediaries will be aware of the
+// liveness of the connection. Make sure these parameters are set in
+// coordination with the keepalive policy on the server, as incompatible
+// settings can result in closing of connection.
+type ClientParameters struct {
+	// After a duration of this time if the client doesn't see any activity it
+	// pings the server to see if the transport is still alive.
+	// If set below 10s, a minimum value of 10s will be used instead.
+	Time time.Duration // The current default value is infinity.
+	// After having pinged for keepalive check, the client waits for a duration
+	// of Timeout and if no activity is seen even after that the connection is
+	// closed.
+	Timeout time.Duration // The current default value is 20 seconds.
+	// If true, client sends keepalive pings even with no active RPCs. If false,
+	// when there are no active RPCs, Time and Timeout will be ignored and no
+	// keepalive pings will be sent.
+	PermitWithoutStream bool // false by default.
+}
+
+// ServerParameters is used to set keepalive and max-age parameters on the
+// server-side.
+type ServerParameters struct {
+	// MaxConnectionIdle is a duration for the amount of time after which an
+	// idle connection would be closed by sending a GoAway. Idleness duration is
+	// defined since the most recent time the number of outstanding RPCs became
+	// zero or the connection establishment.
+	MaxConnectionIdle time.Duration // The current default value is infinity.
+	// MaxConnectionAge is a duration for the maximum amount of time a
+	// connection may exist before it will be closed by sending a GoAway. A
+	// random jitter of +/-10% will be added to MaxConnectionAge to spread out
+	// connection storms.
+	MaxConnectionAge time.Duration // The current default value is infinity.
+	// MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
+	// which the connection will be forcibly closed.
+	MaxConnectionAgeGrace time.Duration // The current default value is infinity.
+	// After a duration of this time if the server doesn't see any activity it
+	// pings the client to see if the transport is still alive.
+	// If set below 1s, a minimum value of 1s will be used instead.
+	Time time.Duration // The current default value is 2 hours.
+	// After having pinged for keepalive check, the server waits for a duration
+	// of Timeout and if no activity is seen even after that the connection is
+	// closed.
+	Timeout time.Duration // The current default value is 20 seconds.
+}
+
+// EnforcementPolicy is used to set keepalive enforcement policy on the
+// server-side. Server will close connection with a client that violates this
+// policy.
+type EnforcementPolicy struct {
+	// MinTime is the minimum amount of time a client should wait before sending
+	// a keepalive ping.
+	MinTime time.Duration // The current default value is 5 minutes.
+	// If true, server allows keepalive pings even when there are no active
+	// streams(RPCs). If false, and client sends ping when there are no active
+	// streams, server will send GOAWAY and close the connection.
+	PermitWithoutStream bool // false by default.
+}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
new file mode 100644
index 000000000..fb4a88f59
--- /dev/null
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -0,0 +1,288 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package metadata define the structure of the metadata supported by gRPC library.
+// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
+// for more information about custom-metadata.
+package metadata // import "google.golang.org/grpc/metadata"
+
+import (
+	"context"
+	"fmt"
+	"strings"
+)
+
+// DecodeKeyValue returns k, v, nil.
+//
+// Deprecated: use k and v directly instead.
+func DecodeKeyValue(k, v string) (string, string, error) {
+	return k, v, nil
+}
+
+// MD is a mapping from metadata keys to values. Users should use the following
+// two convenience functions New and Pairs to generate MD.
+type MD map[string][]string
+
+// New creates an MD from a given key-value map.
+//
+// Only the following ASCII characters are allowed in keys:
+//   - digits: 0-9
+//   - uppercase letters: A-Z (normalized to lower)
+//   - lowercase letters: a-z
+//   - special characters: -_.
+//
+// Uppercase letters are automatically converted to lowercase.
+//
+// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
+// result in errors if set in metadata.
+func New(m map[string]string) MD {
+	md := make(MD, len(m))
+	for k, val := range m {
+		key := strings.ToLower(k)
+		md[key] = append(md[key], val)
+	}
+	return md
+}
+
+// Pairs returns an MD formed by the mapping of key, value ...
+// Pairs panics if len(kv) is odd.
+//
+// Only the following ASCII characters are allowed in keys:
+//   - digits: 0-9
+//   - uppercase letters: A-Z (normalized to lower)
+//   - lowercase letters: a-z
+//   - special characters: -_.
+//
+// Uppercase letters are automatically converted to lowercase.
+//
+// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
+// result in errors if set in metadata.
+func Pairs(kv ...string) MD {
+	if len(kv)%2 == 1 {
+		panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
+	}
+	md := make(MD, len(kv)/2)
+	for i := 0; i < len(kv); i += 2 {
+		key := strings.ToLower(kv[i])
+		md[key] = append(md[key], kv[i+1])
+	}
+	return md
+}
+
+// Len returns the number of items in md.
+func (md MD) Len() int {
+	return len(md)
+}
+
+// Copy returns a copy of md.
+func (md MD) Copy() MD {
+	return Join(md)
+}
+
+// Get obtains the values for a given key.
+//
+// k is converted to lowercase before searching in md.
+func (md MD) Get(k string) []string {
+	k = strings.ToLower(k)
+	return md[k]
+}
+
+// Set sets the value of a given key with a slice of values.
+//
+// k is converted to lowercase before storing in md.
+func (md MD) Set(k string, vals ...string) {
+	if len(vals) == 0 {
+		return
+	}
+	k = strings.ToLower(k)
+	md[k] = vals
+}
+
+// Append adds the values to key k, not overwriting what was already stored at
+// that key.
+//
+// k is converted to lowercase before storing in md.
+func (md MD) Append(k string, vals ...string) {
+	if len(vals) == 0 {
+		return
+	}
+	k = strings.ToLower(k)
+	md[k] = append(md[k], vals...)
+}
+
+// Delete removes the values for a given key k which is converted to lowercase
+// before removing it from md.
+func (md MD) Delete(k string) {
+	k = strings.ToLower(k)
+	delete(md, k)
+}
+
+// Join joins any number of mds into a single MD.
+//
+// The order of values for each key is determined by the order in which the mds
+// containing those values are presented to Join.
+func Join(mds ...MD) MD {
+	out := MD{}
+	for _, md := range mds {
+		for k, v := range md {
+			out[k] = append(out[k], v...)
+		}
+	}
+	return out
+}
+
+type mdIncomingKey struct{}
+type mdOutgoingKey struct{}
+
+// NewIncomingContext creates a new context with incoming md attached.
+func NewIncomingContext(ctx context.Context, md MD) context.Context {
+	return context.WithValue(ctx, mdIncomingKey{}, md)
+}
+
+// NewOutgoingContext creates a new context with outgoing md attached. If used
+// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
+// overwrite any previously-appended metadata.
+func NewOutgoingContext(ctx context.Context, md MD) context.Context {
+	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
+}
+
+// AppendToOutgoingContext returns a new context with the provided kv merged
+// with any existing metadata in the context. Please refer to the documentation
+// of Pairs for a description of kv.
+func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
+	if len(kv)%2 == 1 {
+		panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
+	}
+	md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
+	added := make([][]string, len(md.added)+1)
+	copy(added, md.added)
+	added[len(added)-1] = make([]string, len(kv))
+	copy(added[len(added)-1], kv)
+	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
+}
+
+// FromIncomingContext returns the incoming metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
+func FromIncomingContext(ctx context.Context) (MD, bool) {
+	md, ok := ctx.Value(mdIncomingKey{}).(MD)
+	if !ok {
+		return nil, false
+	}
+	out := make(MD, len(md))
+	for k, v := range md {
+		// We need to manually convert all keys to lower case, because MD is a
+		// map, and there's no guarantee that the MD attached to the context is
+		// created using our helper functions.
+		key := strings.ToLower(k)
+		out[key] = copyOf(v)
+	}
+	return out, true
+}
+
+// ValueFromIncomingContext returns the metadata value corresponding to the metadata
+// key from the incoming metadata if it exists. Key must be lower-case.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ValueFromIncomingContext(ctx context.Context, key string) []string {
+	md, ok := ctx.Value(mdIncomingKey{}).(MD)
+	if !ok {
+		return nil
+	}
+
+	if v, ok := md[key]; ok {
+		return copyOf(v)
+	}
+	for k, v := range md {
+		// We need to manually convert all keys to lower case, because MD is a
+		// map, and there's no guarantee that the MD attached to the context is
+		// created using our helper functions.
+		if strings.ToLower(k) == key {
+			return copyOf(v)
+		}
+	}
+	return nil
+}
+
+// the returned slice must not be modified in place
+func copyOf(v []string) []string {
+	vals := make([]string, len(v))
+	copy(vals, v)
+	return vals
+}
+
+// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
+//
+// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
+// is a map, there's no guarantee it's created using our helper functions) and
+// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
+// lowercase).
+//
+// This is intended for gRPC-internal use ONLY. Users should use
+// FromOutgoingContext instead.
+func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
+	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
+	if !ok {
+		return nil, nil, false
+	}
+
+	return raw.md, raw.added, true
+}
+
+// FromOutgoingContext returns the outgoing metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
+func FromOutgoingContext(ctx context.Context) (MD, bool) {
+	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
+	if !ok {
+		return nil, false
+	}
+
+	mdSize := len(raw.md)
+	for i := range raw.added {
+		mdSize += len(raw.added[i]) / 2
+	}
+
+	out := make(MD, mdSize)
+	for k, v := range raw.md {
+		// We need to manually convert all keys to lower case, because MD is a
+		// map, and there's no guarantee that the MD attached to the context is
+		// created using our helper functions.
+		key := strings.ToLower(k)
+		out[key] = copyOf(v)
+	}
+	for _, added := range raw.added {
+		if len(added)%2 == 1 {
+			panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
+		}
+
+		for i := 0; i < len(added); i += 2 {
+			key := strings.ToLower(added[i])
+			out[key] = append(out[key], added[i+1])
+		}
+	}
+	return out, ok
+}
+
+type rawMD struct {
+	md    MD
+	added [][]string
+}
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
new file mode 100644
index 000000000..e01d219ff
--- /dev/null
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package peer defines various peer information associated with RPCs and
+// corresponding utils.
+package peer
+
+import (
+	"context"
+	"net"
+
+	"google.golang.org/grpc/credentials"
+)
+
+// Peer contains the information of the peer for an RPC, such as the address
+// and authentication information.
+type Peer struct {
+	// Addr is the peer address.
+	Addr net.Addr
+	// AuthInfo is the authentication information of the transport.
+	// It is nil if there is no transport security being used.
+	AuthInfo credentials.AuthInfo
+}
+
+type peerKey struct{}
+
+// NewContext creates a new context with peer information attached.
+func NewContext(ctx context.Context, p *Peer) context.Context {
+	return context.WithValue(ctx, peerKey{}, p)
+}
+
+// FromContext returns the peer information in ctx if it exists.
+func FromContext(ctx context.Context) (p *Peer, ok bool) {
+	p, ok = ctx.Value(peerKey{}).(*Peer)
+	return
+}
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
new file mode 100644
index 000000000..c525dc070
--- /dev/null
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -0,0 +1,194 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"io"
+	"sync"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/internal/channelz"
+	istatus "google.golang.org/grpc/internal/status"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/status"
+)
+
+// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
+// actions and unblock when there's a picker update.
+type pickerWrapper struct {
+	mu         sync.Mutex
+	done       bool
+	blockingCh chan struct{}
+	picker     balancer.Picker
+}
+
+func newPickerWrapper() *pickerWrapper {
+	return &pickerWrapper{blockingCh: make(chan struct{})}
+}
+
+// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
+func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
+	pw.mu.Lock()
+	if pw.done {
+		pw.mu.Unlock()
+		return
+	}
+	pw.picker = p
+	// pw.blockingCh should never be nil.
+	close(pw.blockingCh)
+	pw.blockingCh = make(chan struct{})
+	pw.mu.Unlock()
+}
+
+// doneChannelzWrapper performs the following:
+//   - increments the calls started channelz counter
+//   - wraps the done function in the passed in result to increment the calls
+//     failed or calls succeeded channelz counter before invoking the actual
+//     done function.
+func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) {
+	acw.mu.Lock()
+	ac := acw.ac
+	acw.mu.Unlock()
+	ac.incrCallsStarted()
+	done := result.Done
+	result.Done = func(b balancer.DoneInfo) {
+		if b.Err != nil && b.Err != io.EOF {
+			ac.incrCallsFailed()
+		} else {
+			ac.incrCallsSucceeded()
+		}
+		if done != nil {
+			done(b)
+		}
+	}
+}
+
+// pick returns the transport that will be used for the RPC.
+// It may block in the following cases:
+// - there's no picker
+// - the current picker returns ErrNoSubConnAvailable
+// - the current picker returns other errors and failfast is false.
+// - the subConn returned by the current picker is not READY
+// When one of these situations happens, pick blocks until the picker gets updated.
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
+	var ch chan struct{}
+
+	var lastPickErr error
+	for {
+		pw.mu.Lock()
+		if pw.done {
+			pw.mu.Unlock()
+			return nil, balancer.PickResult{}, ErrClientConnClosing
+		}
+
+		if pw.picker == nil {
+			ch = pw.blockingCh
+		}
+		if ch == pw.blockingCh {
+			// This could happen when either:
+			// - pw.picker is nil (the previous if condition), or
+			// - has called pick on the current picker.
+			pw.mu.Unlock()
+			select {
+			case <-ctx.Done():
+				var errStr string
+				if lastPickErr != nil {
+					errStr = "latest balancer error: " + lastPickErr.Error()
+				} else {
+					errStr = ctx.Err().Error()
+				}
+				switch ctx.Err() {
+				case context.DeadlineExceeded:
+					return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
+				case context.Canceled:
+					return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
+				}
+			case <-ch:
+			}
+			continue
+		}
+
+		ch = pw.blockingCh
+		p := pw.picker
+		pw.mu.Unlock()
+
+		pickResult, err := p.Pick(info)
+		if err != nil {
+			if err == balancer.ErrNoSubConnAvailable {
+				continue
+			}
+			if st, ok := status.FromError(err); ok {
+				// Status error: end the RPC unconditionally with this status.
+				// First restrict the code to the list allowed by gRFC A54.
+				if istatus.IsRestrictedControlPlaneCode(st) {
+					err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
+				}
+				return nil, balancer.PickResult{}, dropError{error: err}
+			}
+			// For all other errors, wait for ready RPCs should block and other
+			// RPCs should fail with unavailable.
+			if !failfast {
+				lastPickErr = err
+				continue
+			}
+			return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
+		}
+
+		acw, ok := pickResult.SubConn.(*acBalancerWrapper)
+		if !ok {
+			logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
+			continue
+		}
+		if t := acw.getAddrConn().getReadyTransport(); t != nil {
+			if channelz.IsOn() {
+				doneChannelzWrapper(acw, &pickResult)
+				return t, pickResult, nil
+			}
+			return t, pickResult, nil
+		}
+		if pickResult.Done != nil {
+			// Calling done with nil error, no bytes sent and no bytes received.
+			// DoneInfo with default value works.
+			pickResult.Done(balancer.DoneInfo{})
+		}
+		logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
+		// If ok == false, ac.state is not READY.
+		// A valid picker always returns READY subConn. This means the state of ac
+		// just changed, and picker will be updated shortly.
+		// continue back to the beginning of the for loop to repick.
+	}
+}
+
+func (pw *pickerWrapper) close() {
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+	if pw.done {
+		return
+	}
+	pw.done = true
+	close(pw.blockingCh)
+}
+
+// dropError is a wrapper error that indicates the LB policy wishes to drop the
+// RPC and not retry it.
+type dropError struct {
+	error
+}
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
new file mode 100644
index 000000000..fc91b4d26
--- /dev/null
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -0,0 +1,183 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"errors"
+	"fmt"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/connectivity"
+)
+
+// PickFirstBalancerName is the name of the pick_first balancer.
+const PickFirstBalancerName = "pick_first"
+
+func newPickfirstBuilder() balancer.Builder {
+	return &pickfirstBuilder{}
+}
+
+type pickfirstBuilder struct{}
+
+func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+	return &pickfirstBalancer{cc: cc}
+}
+
+func (*pickfirstBuilder) Name() string {
+	return PickFirstBalancerName
+}
+
+type pickfirstBalancer struct {
+	state   connectivity.State
+	cc      balancer.ClientConn
+	subConn balancer.SubConn
+}
+
+func (b *pickfirstBalancer) ResolverError(err error) {
+	if logger.V(2) {
+		logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err)
+	}
+	if b.subConn == nil {
+		b.state = connectivity.TransientFailure
+	}
+
+	if b.state != connectivity.TransientFailure {
+		// The picker will not change since the balancer does not currently
+		// report an error.
+		return
+	}
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: connectivity.TransientFailure,
+		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
+	})
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+	if len(state.ResolverState.Addresses) == 0 {
+		// The resolver reported an empty address list. Treat it like an error by
+		// calling b.ResolverError.
+		if b.subConn != nil {
+			// Remove the old subConn. All addresses were removed, so it is no longer
+			// valid.
+			b.cc.RemoveSubConn(b.subConn)
+			b.subConn = nil
+		}
+		b.ResolverError(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+
+	if b.subConn != nil {
+		b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses)
+		return nil
+	}
+
+	subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{})
+	if err != nil {
+		if logger.V(2) {
+			logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
+		}
+		b.state = connectivity.TransientFailure
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: connectivity.TransientFailure,
+			Picker:            &picker{err: fmt.Errorf("error creating connection: %v", err)},
+		})
+		return balancer.ErrBadResolverState
+	}
+	b.subConn = subConn
+	b.state = connectivity.Idle
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: connectivity.Connecting,
+		Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+	})
+	b.subConn.Connect()
+	return nil
+}
+
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+	if logger.V(2) {
+		logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state)
+	}
+	if b.subConn != subConn {
+		if logger.V(2) {
+			logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized")
+		}
+		return
+	}
+	b.state = state.ConnectivityState
+	if state.ConnectivityState == connectivity.Shutdown {
+		b.subConn = nil
+		return
+	}
+
+	switch state.ConnectivityState {
+	case connectivity.Ready:
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &picker{result: balancer.PickResult{SubConn: subConn}},
+		})
+	case connectivity.Connecting:
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+		})
+	case connectivity.Idle:
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &idlePicker{subConn: subConn},
+		})
+	case connectivity.TransientFailure:
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &picker{err: state.ConnectionError},
+		})
+	}
+}
+
+func (b *pickfirstBalancer) Close() {
+}
+
+func (b *pickfirstBalancer) ExitIdle() {
+	if b.subConn != nil && b.state == connectivity.Idle {
+		b.subConn.Connect()
+	}
+}
+
+type picker struct {
+	result balancer.PickResult
+	err    error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+	subConn balancer.SubConn
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	i.subConn.Connect()
+	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
+
+func init() {
+	balancer.Register(newPickfirstBuilder())
+}
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
new file mode 100644
index 000000000..cd4554785
--- /dev/null
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -0,0 +1,67 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// PreparedMsg is responsible for creating a Marshalled and Compressed object.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type PreparedMsg struct {
+	// Struct for preparing msg before sending them
+	encodedData []byte
+	hdr         []byte
+	payload     []byte
+}
+
+// Encode marshalls and compresses the message using the codec and compressor for the stream.
+func (p *PreparedMsg) Encode(s Stream, msg interface{}) error {
+	ctx := s.Context()
+	rpcInfo, ok := rpcInfoFromContext(ctx)
+	if !ok {
+		return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo")
+	}
+
+	// check if the context has the relevant information to prepareMsg
+	if rpcInfo.preloaderInfo == nil {
+		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
+	}
+	if rpcInfo.preloaderInfo.codec == nil {
+		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
+	}
+
+	// prepare the msg
+	data, err := encode(rpcInfo.preloaderInfo.codec, msg)
+	if err != nil {
+		return err
+	}
+	p.encodedData = data
+	compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
+	if err != nil {
+		return err
+	}
+	p.hdr, p.payload = msgHeader(data, compData)
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
new file mode 100644
index 000000000..a6f26c8ab
--- /dev/null
+++ b/vendor/google.golang.org/grpc/regenerate.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+# Copyright 2020 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eu -o pipefail
+
+WORKDIR=$(mktemp -d)
+
+function finish {
+  rm -rf "$WORKDIR"
+}
+trap finish EXIT
+
+export GOBIN=${WORKDIR}/bin
+export PATH=${GOBIN}:${PATH}
+mkdir -p ${GOBIN}
+
+echo "remove existing generated files"
+# grpc_testing_not_regenerate/*.pb.go is not re-generated,
+# see grpc_testing_not_regenerate/README.md for details.
+rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
+
+echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
+(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
+
+echo "go install cmd/protoc-gen-go-grpc"
+(cd cmd/protoc-gen-go-grpc && go install .)
+
+echo "git clone https://github.com/grpc/grpc-proto"
+git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
+
+echo "git clone https://github.com/protocolbuffers/protobuf"
+git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf
+
+# Pull in code.proto as a proto dependency
+mkdir -p ${WORKDIR}/googleapis/google/rpc
+echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
+curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
+
+mkdir -p ${WORKDIR}/out
+
+# Generates sources without the embed requirement
+LEGACY_SOURCES=(
+  ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
+  ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
+  ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
+  ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
+  profiling/proto/service.proto
+  ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
+  ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
+)
+
+# Generates only the new gRPC Service symbols
+SOURCES=(
+  $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$')
+  ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
+  ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
+  ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
+  ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
+  ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
+  ${WORKDIR}/grpc-proto/grpc/testing/*.proto
+  ${WORKDIR}/grpc-proto/grpc/core/*.proto
+)
+
+# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
+# import path of 'bar' in the generated code when 'foo.proto' is imported in
+# one of the sources.
+#
+# Note that the protos listed here are all for testing purposes. All protos to
+# be used externally should have a go_package option (and they don't need to be
+# listed here).
+OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
+Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
+Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
+
+for src in ${SOURCES[@]}; do
+  echo "protoc ${src}"
+  protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \
+    -I"." \
+    -I${WORKDIR}/grpc-proto \
+    -I${WORKDIR}/googleapis \
+    -I${WORKDIR}/protobuf/src \
+    ${src}
+done
+
+for src in ${LEGACY_SOURCES[@]}; do
+  echo "protoc ${src}"
+  protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
+    -I"." \
+    -I${WORKDIR}/grpc-proto \
+    -I${WORKDIR}/googleapis \
+    -I${WORKDIR}/protobuf/src \
+    ${src}
+done
+
+# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
+# current location. Move it into the right place.
+mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
+mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
+
+# grpc_testing_not_regenerate/*.pb.go are not re-generated,
+# see grpc_testing_not_regenerate/README.md for details.
+rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
+
+cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
new file mode 100644
index 000000000..efcb7f3ef
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -0,0 +1,138 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package resolver
+
+type addressMapEntry struct {
+	addr  Address
+	value interface{}
+}
+
+// AddressMap is a map of addresses to arbitrary values taking into account
+// Attributes.  BalancerAttributes are ignored, as are Metadata and Type.
+// Multiple accesses may not be performed concurrently.  Must be created via
+// NewAddressMap; do not construct directly.
+type AddressMap struct {
+	// The underlying map is keyed by an Address with fields that we don't care
+	// about being set to their zero values. The only fields that we care about
+	// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
+	// distinguish between addresses with same `Addr` and `ServerName`, but
+	// different `Attributes`, we cannot store the `Attributes` in the map key.
+	//
+	// The comparison operation for structs work as follows:
+	//  Struct values are comparable if all their fields are comparable. Two
+	//  struct values are equal if their corresponding non-blank fields are equal.
+	//
+	// The value type of the map contains a slice of addresses which match the key
+	// in their `Addr` and `ServerName` fields and contain the corresponding value
+	// associated with them.
+	m map[Address]addressMapEntryList
+}
+
+func toMapKey(addr *Address) Address {
+	return Address{Addr: addr.Addr, ServerName: addr.ServerName}
+}
+
+type addressMapEntryList []*addressMapEntry
+
+// NewAddressMap creates a new AddressMap.
+func NewAddressMap() *AddressMap {
+	return &AddressMap{m: make(map[Address]addressMapEntryList)}
+}
+
+// find returns the index of addr in the addressMapEntry slice, or -1 if not
+// present.
+func (l addressMapEntryList) find(addr Address) int {
+	for i, entry := range l {
+		// Attributes are the only thing to match on here, since `Addr` and
+		// `ServerName` are already equal.
+		if entry.addr.Attributes.Equal(addr.Attributes) {
+			return i
+		}
+	}
+	return -1
+}
+
+// Get returns the value for the address in the map, if present.
+func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
+	addrKey := toMapKey(&addr)
+	entryList := a.m[addrKey]
+	if entry := entryList.find(addr); entry != -1 {
+		return entryList[entry].value, true
+	}
+	return nil, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (a *AddressMap) Set(addr Address, value interface{}) {
+	addrKey := toMapKey(&addr)
+	entryList := a.m[addrKey]
+	if entry := entryList.find(addr); entry != -1 {
+		entryList[entry].value = value
+		return
+	}
+	a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
+}
+
+// Delete removes addr from the map.
+func (a *AddressMap) Delete(addr Address) {
+	addrKey := toMapKey(&addr)
+	entryList := a.m[addrKey]
+	entry := entryList.find(addr)
+	if entry == -1 {
+		return
+	}
+	if len(entryList) == 1 {
+		entryList = nil
+	} else {
+		copy(entryList[entry:], entryList[entry+1:])
+		entryList = entryList[:len(entryList)-1]
+	}
+	a.m[addrKey] = entryList
+}
+
+// Len returns the number of entries in the map.
+func (a *AddressMap) Len() int {
+	ret := 0
+	for _, entryList := range a.m {
+		ret += len(entryList)
+	}
+	return ret
+}
+
+// Keys returns a slice of all current map keys.
+func (a *AddressMap) Keys() []Address {
+	ret := make([]Address, 0, a.Len())
+	for _, entryList := range a.m {
+		for _, entry := range entryList {
+			ret = append(ret, entry.addr)
+		}
+	}
+	return ret
+}
+
+// Values returns a slice of all current map values.
+func (a *AddressMap) Values() []interface{} {
+	ret := make([]interface{}, 0, a.Len())
+	for _, entryList := range a.m {
+		for _, entry := range entryList {
+			ret = append(ret, entry.value)
+		}
+	}
+	return ret
+}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
new file mode 100644
index 000000000..654e9ce69
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -0,0 +1,308 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver defines APIs for name resolution in gRPC.
+// All APIs in this package are experimental.
+package resolver
+
+import (
+	"context"
+	"net"
+	"net/url"
+	"strings"
+
+	"google.golang.org/grpc/attributes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/pretty"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+var (
+	// m is a map from scheme to resolver builder.
+	m = make(map[string]Builder)
+	// defaultScheme is the default scheme to use.
+	defaultScheme = "passthrough"
+)
+
+// TODO(bar) install dns resolver in init(){}.
+
+// Register registers the resolver builder to the resolver map. b.Scheme will be
+// used as the scheme registered with this builder.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Resolvers are
+// registered with the same name, the one registered last will take effect.
+func Register(b Builder) {
+	m[b.Scheme()] = b
+}
+
+// Get returns the resolver builder registered with the given scheme.
+//
+// If no builder is register with the scheme, nil will be returned.
+func Get(scheme string) Builder {
+	if b, ok := m[scheme]; ok {
+		return b
+	}
+	return nil
+}
+
+// SetDefaultScheme sets the default scheme that will be used. The default
+// default scheme is "passthrough".
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. The scheme set last overrides
+// previously set values.
+func SetDefaultScheme(scheme string) {
+	defaultScheme = scheme
+}
+
+// GetDefaultScheme gets the default scheme that will be used.
+func GetDefaultScheme() string {
+	return defaultScheme
+}
+
+// AddressType indicates the address type returned by name resolution.
+//
+// Deprecated: use Attributes in Address instead.
+type AddressType uint8
+
+const (
+	// Backend indicates the address is for a backend server.
+	//
+	// Deprecated: use Attributes in Address instead.
+	Backend AddressType = iota
+	// GRPCLB indicates the address is for a grpclb load balancer.
+	//
+	// Deprecated: to select the GRPCLB load balancing policy, use a service
+	// config with a corresponding loadBalancingConfig.  To supply balancer
+	// addresses to the GRPCLB load balancing policy, set State.Attributes
+	// using balancer/grpclb/state.Set.
+	GRPCLB
+)
+
+// Address represents a server the client connects to.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type Address struct {
+	// Addr is the server address on which a connection will be established.
+	Addr string
+
+	// ServerName is the name of this address.
+	// If non-empty, the ServerName is used as the transport certification authority for
+	// the address, instead of the hostname from the Dial target string. In most cases,
+	// this should not be set.
+	//
+	// If Type is GRPCLB, ServerName should be the name of the remote load
+	// balancer, not the name of the backend.
+	//
+	// WARNING: ServerName must only be populated with trusted values. It
+	// is insecure to populate it with data from untrusted inputs since untrusted
+	// values could be used to bypass the authority checks performed by TLS.
+	ServerName string
+
+	// Attributes contains arbitrary data about this address intended for
+	// consumption by the SubConn.
+	Attributes *attributes.Attributes
+
+	// BalancerAttributes contains arbitrary data about this address intended
+	// for consumption by the LB policy.  These attribes do not affect SubConn
+	// creation, connection establishment, handshaking, etc.
+	BalancerAttributes *attributes.Attributes
+
+	// Type is the type of this address.
+	//
+	// Deprecated: use Attributes instead.
+	Type AddressType
+
+	// Metadata is the information associated with Addr, which may be used
+	// to make load balancing decision.
+	//
+	// Deprecated: use Attributes instead.
+	Metadata interface{}
+}
+
+// Equal returns whether a and o are identical.  Metadata is compared directly,
+// not with any recursive introspection.
+func (a Address) Equal(o Address) bool {
+	return a.Addr == o.Addr && a.ServerName == o.ServerName &&
+		a.Attributes.Equal(o.Attributes) &&
+		a.BalancerAttributes.Equal(o.BalancerAttributes) &&
+		a.Type == o.Type && a.Metadata == o.Metadata
+}
+
+// String returns JSON formatted string representation of the address.
+func (a Address) String() string {
+	return pretty.ToJSON(a)
+}
+
+// BuildOptions includes additional information for the builder to create
+// the resolver.
+type BuildOptions struct {
+	// DisableServiceConfig indicates whether a resolver implementation should
+	// fetch service config data.
+	DisableServiceConfig bool
+	// DialCreds is the transport credentials used by the ClientConn for
+	// communicating with the target gRPC service (set via
+	// WithTransportCredentials). In cases where a name resolution service
+	// requires the same credentials, the resolver may use this field. In most
+	// cases though, it is not appropriate, and this field may be ignored.
+	DialCreds credentials.TransportCredentials
+	// CredsBundle is the credentials bundle used by the ClientConn for
+	// communicating with the target gRPC service (set via
+	// WithCredentialsBundle). In cases where a name resolution service
+	// requires the same credentials, the resolver may use this field. In most
+	// cases though, it is not appropriate, and this field may be ignored.
+	CredsBundle credentials.Bundle
+	// Dialer is the custom dialer used by the ClientConn for dialling the
+	// target gRPC service (set via WithDialer). In cases where a name
+	// resolution service requires the same dialer, the resolver may use this
+	// field. In most cases though, it is not appropriate, and this field may
+	// be ignored.
+	Dialer func(context.Context, string) (net.Conn, error)
+}
+
+// State contains the current Resolver state relevant to the ClientConn.
+type State struct {
+	// Addresses is the latest set of resolved addresses for the target.
+	Addresses []Address
+
+	// ServiceConfig contains the result from parsing the latest service
+	// config.  If it is nil, it indicates no service config is present or the
+	// resolver does not provide service configs.
+	ServiceConfig *serviceconfig.ParseResult
+
+	// Attributes contains arbitrary data about the resolver intended for
+	// consumption by the load balancing policy.
+	Attributes *attributes.Attributes
+}
+
+// ClientConn contains the callbacks for resolver to notify any updates
+// to the gRPC ClientConn.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
+type ClientConn interface {
+	// UpdateState updates the state of the ClientConn appropriately.
+	UpdateState(State) error
+	// ReportError notifies the ClientConn that the Resolver encountered an
+	// error.  The ClientConn will notify the load balancer and begin calling
+	// ResolveNow on the Resolver with exponential backoff.
+	ReportError(error)
+	// NewAddress is called by resolver to notify ClientConn a new list
+	// of resolved addresses.
+	// The address list should be the complete list of resolved addresses.
+	//
+	// Deprecated: Use UpdateState instead.
+	NewAddress(addresses []Address)
+	// NewServiceConfig is called by resolver to notify ClientConn a new
+	// service config. The service config should be provided as a json string.
+	//
+	// Deprecated: Use UpdateState instead.
+	NewServiceConfig(serviceConfig string)
+	// ParseServiceConfig parses the provided service config and returns an
+	// object that provides the parsed config.
+	ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
+}
+
+// Target represents a target for gRPC, as specified in:
+// https://github.com/grpc/grpc/blob/master/doc/naming.md.
+// It is parsed from the target string that gets passed into Dial or DialContext
+// by the user. And gRPC passes it to the resolver and the balancer.
+//
+// If the target follows the naming spec, and the parsed scheme is registered
+// with gRPC, we will parse the target string according to the spec. If the
+// target does not contain a scheme or if the parsed scheme is not registered
+// (i.e. no corresponding resolver available to resolve the endpoint), we will
+// apply the default scheme, and will attempt to reparse it.
+//
+// Examples:
+//
+//   - "dns://some_authority/foo.bar"
+//     Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
+//   - "foo.bar"
+//     Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
+//   - "unknown_scheme://authority/endpoint"
+//     Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
+type Target struct {
+	// Deprecated: use URL.Scheme instead.
+	Scheme string
+	// Deprecated: use URL.Host instead.
+	Authority string
+	// URL contains the parsed dial target with an optional default scheme added
+	// to it if the original dial target contained no scheme or contained an
+	// unregistered scheme. Any query params specified in the original dial
+	// target can be accessed from here.
+	URL url.URL
+}
+
+// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
+// or `URL.Opaque`. The latter is used when the former is empty.
+func (t Target) Endpoint() string {
+	endpoint := t.URL.Path
+	if endpoint == "" {
+		endpoint = t.URL.Opaque
+	}
+	// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
+	// value returned from url.Parse() contains a leading "/". Although this is
+	// in accordance with RFC 3986, we do not want to break existing resolver
+	// implementations which expect the endpoint without the leading "/". So, we
+	// end up stripping the leading "/" here. But this will result in an
+	// incorrect parsing for something like "unix:///path/to/socket". Since we
+	// own the "unix" resolver, we can workaround in the unix resolver by using
+	// the `URL` field.
+	return strings.TrimPrefix(endpoint, "/")
+}
+
+// Builder creates a resolver that will be used to watch name resolution updates.
+type Builder interface {
+	// Build creates a new resolver for the given target.
+	//
+	// gRPC dial calls Build synchronously, and fails if the returned error is
+	// not nil.
+	Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
+	// Scheme returns the scheme supported by this resolver.
+	// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
+	Scheme() string
+}
+
+// ResolveNowOptions includes additional information for ResolveNow.
+type ResolveNowOptions struct{}
+
+// Resolver watches for the updates on the specified target.
+// Updates include address updates and service config updates.
+type Resolver interface {
+	// ResolveNow will be called by gRPC to try to resolve the target name
+	// again. It's just a hint, resolver can ignore this if it's not necessary.
+	//
+	// It could be called multiple times concurrently.
+	ResolveNow(ResolveNowOptions)
+	// Close closes the resolver.
+	Close()
+}
+
+// UnregisterForTesting removes the resolver builder with the given scheme from the
+// resolver map.
+// This function is for testing only.
+func UnregisterForTesting(scheme string) {
+	delete(m, scheme)
+}
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
new file mode 100644
index 000000000..05a9d4e0b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -0,0 +1,176 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"strings"
+	"sync"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/pretty"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+// ccResolverWrapper is a wrapper on top of cc for resolvers.
+// It implements resolver.ClientConn interface.
+type ccResolverWrapper struct {
+	cc         *ClientConn
+	resolverMu sync.Mutex
+	resolver   resolver.Resolver
+	done       *grpcsync.Event
+	curState   resolver.State
+
+	incomingMu sync.Mutex // Synchronizes all the incoming calls.
+}
+
+// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
+// returns a ccResolverWrapper object which wraps the newly built resolver.
+func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
+	ccr := &ccResolverWrapper{
+		cc:   cc,
+		done: grpcsync.NewEvent(),
+	}
+
+	var credsClone credentials.TransportCredentials
+	if creds := cc.dopts.copts.TransportCredentials; creds != nil {
+		credsClone = creds.Clone()
+	}
+	rbo := resolver.BuildOptions{
+		DisableServiceConfig: cc.dopts.disableServiceConfig,
+		DialCreds:            credsClone,
+		CredsBundle:          cc.dopts.copts.CredsBundle,
+		Dialer:               cc.dopts.copts.Dialer,
+	}
+
+	var err error
+	// We need to hold the lock here while we assign to the ccr.resolver field
+	// to guard against a data race caused by the following code path,
+	// rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
+	// accessing ccr.resolver which is being assigned here.
+	ccr.resolverMu.Lock()
+	defer ccr.resolverMu.Unlock()
+	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
+	if err != nil {
+		return nil, err
+	}
+	return ccr, nil
+}
+
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
+	ccr.resolverMu.Lock()
+	if !ccr.done.HasFired() {
+		ccr.resolver.ResolveNow(o)
+	}
+	ccr.resolverMu.Unlock()
+}
+
+func (ccr *ccResolverWrapper) close() {
+	ccr.resolverMu.Lock()
+	ccr.resolver.Close()
+	ccr.done.Fire()
+	ccr.resolverMu.Unlock()
+}
+
+func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
+	ccr.incomingMu.Lock()
+	defer ccr.incomingMu.Unlock()
+	if ccr.done.HasFired() {
+		return nil
+	}
+	ccr.addChannelzTraceEvent(s)
+	ccr.curState = s
+	if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
+		return balancer.ErrBadResolverState
+	}
+	return nil
+}
+
+func (ccr *ccResolverWrapper) ReportError(err error) {
+	ccr.incomingMu.Lock()
+	defer ccr.incomingMu.Unlock()
+	if ccr.done.HasFired() {
+		return
+	}
+	channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
+	ccr.cc.updateResolverState(resolver.State{}, err)
+}
+
+// NewAddress is called by the resolver implementation to send addresses to gRPC.
+func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
+	ccr.incomingMu.Lock()
+	defer ccr.incomingMu.Unlock()
+	if ccr.done.HasFired() {
+		return
+	}
+	ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
+	ccr.curState.Addresses = addrs
+	ccr.cc.updateResolverState(ccr.curState, nil)
+}
+
+// NewServiceConfig is called by the resolver implementation to send service
+// configs to gRPC.
+func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
+	ccr.incomingMu.Lock()
+	defer ccr.incomingMu.Unlock()
+	if ccr.done.HasFired() {
+		return
+	}
+	channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc)
+	if ccr.cc.dopts.disableServiceConfig {
+		channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
+		return
+	}
+	scpr := parseServiceConfig(sc)
+	if scpr.Err != nil {
+		channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
+		return
+	}
+	ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
+	ccr.curState.ServiceConfig = scpr
+	ccr.cc.updateResolverState(ccr.curState, nil)
+}
+
+func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
+	return parseServiceConfig(scJSON)
+}
+
+func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+	var updates []string
+	var oldSC, newSC *ServiceConfig
+	var oldOK, newOK bool
+	if ccr.curState.ServiceConfig != nil {
+		oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
+	}
+	if s.ServiceConfig != nil {
+		newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
+	}
+	if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
+		updates = append(updates, "service config updated")
+	}
+	if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
+		updates = append(updates, "resolver returned an empty address list")
+	} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
+		updates = append(updates, "resolver returned new addresses")
+	}
+	channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
new file mode 100644
index 000000000..cb7020ebe
--- /dev/null
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -0,0 +1,916 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"bytes"
+	"compress/gzip"
+	"context"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"math"
+	"strings"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/encoding/proto"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+)
+
+// Compressor defines the interface gRPC uses to compress a message.
+//
+// Deprecated: use package encoding.
+type Compressor interface {
+	// Do compresses p into w.
+	Do(w io.Writer, p []byte) error
+	// Type returns the compression algorithm the Compressor uses.
+	Type() string
+}
+
+type gzipCompressor struct {
+	pool sync.Pool
+}
+
+// NewGZIPCompressor creates a Compressor based on GZIP.
+//
+// Deprecated: use package encoding/gzip.
+func NewGZIPCompressor() Compressor {
+	c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
+	return c
+}
+
+// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
+// of assuming DefaultCompression.
+//
+// The error returned will be nil if the level is valid.
+//
+// Deprecated: use package encoding/gzip.
+func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
+	if level < gzip.DefaultCompression || level > gzip.BestCompression {
+		return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
+	}
+	return &gzipCompressor{
+		pool: sync.Pool{
+			New: func() interface{} {
+				w, err := gzip.NewWriterLevel(io.Discard, level)
+				if err != nil {
+					panic(err)
+				}
+				return w
+			},
+		},
+	}, nil
+}
+
+func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
+	z := c.pool.Get().(*gzip.Writer)
+	defer c.pool.Put(z)
+	z.Reset(w)
+	if _, err := z.Write(p); err != nil {
+		return err
+	}
+	return z.Close()
+}
+
+func (c *gzipCompressor) Type() string {
+	return "gzip"
+}
+
+// Decompressor defines the interface gRPC uses to decompress a message.
+//
+// Deprecated: use package encoding.
+type Decompressor interface {
+	// Do reads the data from r and uncompress them.
+	Do(r io.Reader) ([]byte, error)
+	// Type returns the compression algorithm the Decompressor uses.
+	Type() string
+}
+
+type gzipDecompressor struct {
+	pool sync.Pool
+}
+
+// NewGZIPDecompressor creates a Decompressor based on GZIP.
+//
+// Deprecated: use package encoding/gzip.
+func NewGZIPDecompressor() Decompressor {
+	return &gzipDecompressor{}
+}
+
+func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
+	var z *gzip.Reader
+	switch maybeZ := d.pool.Get().(type) {
+	case nil:
+		newZ, err := gzip.NewReader(r)
+		if err != nil {
+			return nil, err
+		}
+		z = newZ
+	case *gzip.Reader:
+		z = maybeZ
+		if err := z.Reset(r); err != nil {
+			d.pool.Put(z)
+			return nil, err
+		}
+	}
+
+	defer func() {
+		z.Close()
+		d.pool.Put(z)
+	}()
+	return io.ReadAll(z)
+}
+
+func (d *gzipDecompressor) Type() string {
+	return "gzip"
+}
+
+// callInfo contains all related configuration and information about an RPC.
+type callInfo struct {
+	compressorType        string
+	failFast              bool
+	maxReceiveMessageSize *int
+	maxSendMessageSize    *int
+	creds                 credentials.PerRPCCredentials
+	contentSubtype        string
+	codec                 baseCodec
+	maxRetryRPCBufferSize int
+}
+
+func defaultCallInfo() *callInfo {
+	return &callInfo{
+		failFast:              true,
+		maxRetryRPCBufferSize: 256 * 1024, // 256KB
+	}
+}
+
+// CallOption configures a Call before it starts or extracts information from
+// a Call after it completes.
+type CallOption interface {
+	// before is called before the call is sent to any server.  If before
+	// returns a non-nil error, the RPC fails with that error.
+	before(*callInfo) error
+
+	// after is called after the call has completed.  after cannot return an
+	// error, so any failures should be reported via output parameters.
+	after(*callInfo, *csAttempt)
+}
+
+// EmptyCallOption does not alter the Call configuration.
+// It can be embedded in another structure to carry satellite data for use
+// by interceptors.
+type EmptyCallOption struct{}
+
+func (EmptyCallOption) before(*callInfo) error      { return nil }
+func (EmptyCallOption) after(*callInfo, *csAttempt) {}
+
+// Header returns a CallOptions that retrieves the header metadata
+// for a unary RPC.
+func Header(md *metadata.MD) CallOption {
+	return HeaderCallOption{HeaderAddr: md}
+}
+
+// HeaderCallOption is a CallOption for collecting response header metadata.
+// The metadata field will be populated *after* the RPC completes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type HeaderCallOption struct {
+	HeaderAddr *metadata.MD
+}
+
+func (o HeaderCallOption) before(c *callInfo) error { return nil }
+func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
+	*o.HeaderAddr, _ = attempt.s.Header()
+}
+
+// Trailer returns a CallOptions that retrieves the trailer metadata
+// for a unary RPC.
+func Trailer(md *metadata.MD) CallOption {
+	return TrailerCallOption{TrailerAddr: md}
+}
+
+// TrailerCallOption is a CallOption for collecting response trailer metadata.
+// The metadata field will be populated *after* the RPC completes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type TrailerCallOption struct {
+	TrailerAddr *metadata.MD
+}
+
+func (o TrailerCallOption) before(c *callInfo) error { return nil }
+func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
+	*o.TrailerAddr = attempt.s.Trailer()
+}
+
+// Peer returns a CallOption that retrieves peer information for a unary RPC.
+// The peer field will be populated *after* the RPC completes.
+func Peer(p *peer.Peer) CallOption {
+	return PeerCallOption{PeerAddr: p}
+}
+
+// PeerCallOption is a CallOption for collecting the identity of the remote
+// peer. The peer field will be populated *after* the RPC completes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type PeerCallOption struct {
+	PeerAddr *peer.Peer
+}
+
+func (o PeerCallOption) before(c *callInfo) error { return nil }
+func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
+	if x, ok := peer.FromContext(attempt.s.Context()); ok {
+		*o.PeerAddr = *x
+	}
+}
+
+// WaitForReady configures the action to take when an RPC is attempted on broken
+// connections or unreachable servers. If waitForReady is false and the
+// connection is in the TRANSIENT_FAILURE state, the RPC will fail
+// immediately. Otherwise, the RPC client will block the call until a
+// connection is available (or the call is canceled or times out) and will
+// retry the call if it fails due to a transient error.  gRPC will not retry if
+// data was written to the wire unless the server indicates it did not process
+// the data.  Please refer to
+// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+//
+// By default, RPCs don't "wait for ready".
+func WaitForReady(waitForReady bool) CallOption {
+	return FailFastCallOption{FailFast: !waitForReady}
+}
+
+// FailFast is the opposite of WaitForReady.
+//
+// Deprecated: use WaitForReady.
+func FailFast(failFast bool) CallOption {
+	return FailFastCallOption{FailFast: failFast}
+}
+
+// FailFastCallOption is a CallOption for indicating whether an RPC should fail
+// fast or not.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type FailFastCallOption struct {
+	FailFast bool
+}
+
+func (o FailFastCallOption) before(c *callInfo) error {
+	c.failFast = o.FailFast
+	return nil
+}
+func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
+
+// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can receive. If this is not set, gRPC uses the default
+// 4MB.
+func MaxCallRecvMsgSize(bytes int) CallOption {
+	return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
+}
+
+// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
+// size in bytes the client can receive.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type MaxRecvMsgSizeCallOption struct {
+	MaxRecvMsgSize int
+}
+
+func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
+	c.maxReceiveMessageSize = &o.MaxRecvMsgSize
+	return nil
+}
+func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+
+// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can send. If this is not set, gRPC uses the default
+// `math.MaxInt32`.
+func MaxCallSendMsgSize(bytes int) CallOption {
+	return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
+}
+
+// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
+// size in bytes the client can send.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type MaxSendMsgSizeCallOption struct {
+	MaxSendMsgSize int
+}
+
+func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
+	c.maxSendMessageSize = &o.MaxSendMsgSize
+	return nil
+}
+func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+
+// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
+// for a call.
+func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
+	return PerRPCCredsCallOption{Creds: creds}
+}
+
+// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
+// credentials to use for the call.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type PerRPCCredsCallOption struct {
+	Creds credentials.PerRPCCredentials
+}
+
+func (o PerRPCCredsCallOption) before(c *callInfo) error {
+	c.creds = o.Creds
+	return nil
+}
+func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
+
+// UseCompressor returns a CallOption which sets the compressor used when
+// sending the request.  If WithCompressor is also set, UseCompressor has
+// higher priority.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func UseCompressor(name string) CallOption {
+	return CompressorCallOption{CompressorType: name}
+}
+
+// CompressorCallOption is a CallOption that indicates the compressor to use.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type CompressorCallOption struct {
+	CompressorType string
+}
+
+func (o CompressorCallOption) before(c *callInfo) error {
+	c.compressorType = o.CompressorType
+	return nil
+}
+func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
+
+// CallContentSubtype returns a CallOption that will set the content-subtype
+// for a call. For example, if content-subtype is "json", the Content-Type over
+// the wire will be "application/grpc+json". The content-subtype is converted
+// to lowercase before being included in Content-Type. See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If ForceCodec is not also used, the content-subtype will be used to look up
+// the Codec to use in the registry controlled by RegisterCodec. See the
+// documentation on RegisterCodec for details on registration. The lookup of
+// content-subtype is case-insensitive. If no such Codec is found, the call
+// will result in an error with code codes.Internal.
+//
+// If ForceCodec is also used, that Codec will be used for all request and
+// response messages, with the content-subtype set to the given contentSubtype
+// here for requests.
+func CallContentSubtype(contentSubtype string) CallOption {
+	return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
+}
+
+// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
+// used for marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ContentSubtypeCallOption struct {
+	ContentSubtype string
+}
+
+func (o ContentSubtypeCallOption) before(c *callInfo) error {
+	c.contentSubtype = o.ContentSubtype
+	return nil
+}
+func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
+
+// ForceCodec returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceCodec(codec encoding.Codec) CallOption {
+	return ForceCodecCallOption{Codec: codec}
+}
+
+// ForceCodecCallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ForceCodecCallOption struct {
+	Codec encoding.Codec
+}
+
+func (o ForceCodecCallOption) before(c *callInfo) error {
+	c.codec = o.Codec
+	return nil
+}
+func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+
+// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
+// an encoding.Codec.
+//
+// Deprecated: use ForceCodec instead.
+func CallCustomCodec(codec Codec) CallOption {
+	return CustomCodecCallOption{Codec: codec}
+}
+
+// CustomCodecCallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type CustomCodecCallOption struct {
+	Codec Codec
+}
+
+func (o CustomCodecCallOption) before(c *callInfo) error {
+	c.codec = o.Codec
+	return nil
+}
+func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
+
+// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
+// used for buffering this RPC's requests for retry purposes.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func MaxRetryRPCBufferSize(bytes int) CallOption {
+	return MaxRetryRPCBufferSizeCallOption{bytes}
+}
+
+// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
+// memory to be used for caching this RPC for retry purposes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type MaxRetryRPCBufferSizeCallOption struct {
+	MaxRetryRPCBufferSize int
+}
+
+func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
+	c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
+	return nil
+}
+func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
+
+// The format of the payload: compressed or not?
+type payloadFormat uint8
+
+const (
+	compressionNone payloadFormat = 0 // no compression
+	compressionMade payloadFormat = 1 // compressed
+)
+
+// parser reads complete gRPC messages from the underlying reader.
+type parser struct {
+	// r is the underlying reader.
+	// See the comment on recvMsg for the permissible
+	// error types.
+	r io.Reader
+
+	// The header of a gRPC message. Find more detail at
+	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
+	header [5]byte
+}
+
+// recvMsg reads a complete gRPC message from the stream.
+//
+// It returns the message and its payload (compression/encoding)
+// format. The caller owns the returned msg memory.
+//
+// If there is an error, possible values are:
+//   - io.EOF, when no messages remain
+//   - io.ErrUnexpectedEOF
+//   - of type transport.ConnectionError
+//   - an error from the status package
+//
+// No other error values or types must be returned, which also means
+// that the underlying io.Reader must not return an incompatible
+// error.
+func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
+	if _, err := p.r.Read(p.header[:]); err != nil {
+		return 0, nil, err
+	}
+
+	pf = payloadFormat(p.header[0])
+	length := binary.BigEndian.Uint32(p.header[1:])
+
+	if length == 0 {
+		return pf, nil, nil
+	}
+	if int64(length) > int64(maxInt) {
+		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
+	}
+	if int(length) > maxReceiveMessageSize {
+		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
+	}
+	// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
+	// of making it for each message:
+	msg = make([]byte, int(length))
+	if _, err := p.r.Read(msg); err != nil {
+		if err == io.EOF {
+			err = io.ErrUnexpectedEOF
+		}
+		return 0, nil, err
+	}
+	return pf, msg, nil
+}
+
+// encode serializes msg and returns a buffer containing the message, or an
+// error if it is too large to be transmitted by grpc.  If msg is nil, it
+// generates an empty message.
+func encode(c baseCodec, msg interface{}) ([]byte, error) {
+	if msg == nil { // NOTE: typed nils will not be caught by this check
+		return nil, nil
+	}
+	b, err := c.Marshal(msg)
+	if err != nil {
+		return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
+	}
+	if uint(len(b)) > math.MaxUint32 {
+		return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+	}
+	return b, nil
+}
+
+// compress returns the input bytes compressed by compressor or cp.  If both
+// compressors are nil, returns nil.
+//
+// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
+func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
+	if compressor == nil && cp == nil {
+		return nil, nil
+	}
+	wrapErr := func(err error) error {
+		return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+	}
+	cbuf := &bytes.Buffer{}
+	if compressor != nil {
+		z, err := compressor.Compress(cbuf)
+		if err != nil {
+			return nil, wrapErr(err)
+		}
+		if _, err := z.Write(in); err != nil {
+			return nil, wrapErr(err)
+		}
+		if err := z.Close(); err != nil {
+			return nil, wrapErr(err)
+		}
+	} else {
+		if err := cp.Do(cbuf, in); err != nil {
+			return nil, wrapErr(err)
+		}
+	}
+	return cbuf.Bytes(), nil
+}
+
+const (
+	payloadLen = 1
+	sizeLen    = 4
+	headerLen  = payloadLen + sizeLen
+)
+
+// msgHeader returns a 5-byte header for the message being transmitted and the
+// payload, which is compData if non-nil or data otherwise.
+func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
+	hdr = make([]byte, headerLen)
+	if compData != nil {
+		hdr[0] = byte(compressionMade)
+		data = compData
+	} else {
+		hdr[0] = byte(compressionNone)
+	}
+
+	// Write length of payload into buf
+	binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
+	return hdr, data
+}
+
+func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
+	return &stats.OutPayload{
+		Client:     client,
+		Payload:    msg,
+		Data:       data,
+		Length:     len(data),
+		WireLength: len(payload) + headerLen,
+		SentTime:   t,
+	}
+}
+
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
+	switch pf {
+	case compressionNone:
+	case compressionMade:
+		if recvCompress == "" || recvCompress == encoding.Identity {
+			return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
+		}
+		if !haveCompressor {
+			return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+		}
+	default:
+		return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
+	}
+	return nil
+}
+
+type payloadInfo struct {
+	wireLength        int // The compressed length got from wire.
+	uncompressedBytes []byte
+}
+
+func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
+	pf, d, err := p.recvMsg(maxReceiveMessageSize)
+	if err != nil {
+		return nil, err
+	}
+	if payInfo != nil {
+		payInfo.wireLength = len(d)
+	}
+
+	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
+		return nil, st.Err()
+	}
+
+	var size int
+	if pf == compressionMade {
+		// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
+		// use this decompressor as the default.
+		if dc != nil {
+			d, err = dc.Do(bytes.NewReader(d))
+			size = len(d)
+		} else {
+			d, size, err = decompress(compressor, d, maxReceiveMessageSize)
+		}
+		if err != nil {
+			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
+		}
+		if size > maxReceiveMessageSize {
+			// TODO: Revisit the error code. Currently keep it consistent with java
+			// implementation.
+			return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+		}
+	}
+	return d, nil
+}
+
+// Using compressor, decompress d, returning data and size.
+// Optionally, if data will be over maxReceiveMessageSize, just return the size.
+func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
+	dcReader, err := compressor.Decompress(bytes.NewReader(d))
+	if err != nil {
+		return nil, 0, err
+	}
+	if sizer, ok := compressor.(interface {
+		DecompressedSize(compressedBytes []byte) int
+	}); ok {
+		if size := sizer.DecompressedSize(d); size >= 0 {
+			if size > maxReceiveMessageSize {
+				return nil, size, nil
+			}
+			// size is used as an estimate to size the buffer, but we
+			// will read more data if available.
+			// +MinRead so ReadFrom will not reallocate if size is correct.
+			buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
+			bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+			return buf.Bytes(), int(bytesRead), err
+		}
+	}
+	// Read from LimitReader with limit max+1. So if the underlying
+	// reader is over limit, the result will be bigger than max.
+	d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+	return d, len(d), err
+}
+
+// For the two compressor parameters, both should not be set, but if they are,
+// dc takes precedence over compressor.
+// TODO(dfawley): wrap the old compressor/decompressor using the new API?
+func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
+	d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
+	if err != nil {
+		return err
+	}
+	if err := c.Unmarshal(d, m); err != nil {
+		return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
+	}
+	if payInfo != nil {
+		payInfo.uncompressedBytes = d
+	}
+	return nil
+}
+
+// Information about RPC
+type rpcInfo struct {
+	failfast      bool
+	preloaderInfo *compressorInfo
+}
+
+// Information about Preloader
+// Responsible for storing codec, and compressors
+// If stream (s) has  context s.Context which stores rpcInfo that has non nil
+// pointers to codec, and compressors, then we can use preparedMsg for Async message prep
+// and reuse marshalled bytes
+type compressorInfo struct {
+	codec baseCodec
+	cp    Compressor
+	comp  encoding.Compressor
+}
+
+type rpcInfoContextKey struct{}
+
+func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
+	return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
+		failfast: failfast,
+		preloaderInfo: &compressorInfo{
+			codec: codec,
+			cp:    cp,
+			comp:  comp,
+		},
+	})
+}
+
+func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
+	s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo)
+	return
+}
+
+// Code returns the error code for err if it was produced by the rpc system.
+// Otherwise, it returns codes.Unknown.
+//
+// Deprecated: use status.Code instead.
+func Code(err error) codes.Code {
+	return status.Code(err)
+}
+
+// ErrorDesc returns the error description of err if it was produced by the rpc system.
+// Otherwise, it returns err.Error() or empty string when err is nil.
+//
+// Deprecated: use status.Convert and Message method instead.
+func ErrorDesc(err error) string {
+	return status.Convert(err).Message()
+}
+
+// Errorf returns an error containing an error code and a description;
+// Errorf returns nil if c is OK.
+//
+// Deprecated: use status.Errorf instead.
+func Errorf(c codes.Code, format string, a ...interface{}) error {
+	return status.Errorf(c, format, a...)
+}
+
+// toRPCErr converts an error into an error from the status package.
+func toRPCErr(err error) error {
+	switch err {
+	case nil, io.EOF:
+		return err
+	case context.DeadlineExceeded:
+		return status.Error(codes.DeadlineExceeded, err.Error())
+	case context.Canceled:
+		return status.Error(codes.Canceled, err.Error())
+	case io.ErrUnexpectedEOF:
+		return status.Error(codes.Internal, err.Error())
+	}
+
+	switch e := err.(type) {
+	case transport.ConnectionError:
+		return status.Error(codes.Unavailable, e.Desc)
+	case *transport.NewStreamError:
+		return toRPCErr(e.Err)
+	}
+
+	if _, ok := status.FromError(err); ok {
+		return err
+	}
+
+	return status.Error(codes.Unknown, err.Error())
+}
+
+// setCallInfoCodec should only be called after CallOptions have been applied.
+func setCallInfoCodec(c *callInfo) error {
+	if c.codec != nil {
+		// codec was already set by a CallOption; use it, but set the content
+		// subtype if it is not set.
+		if c.contentSubtype == "" {
+			// c.codec is a baseCodec to hide the difference between grpc.Codec and
+			// encoding.Codec (Name vs. String method name).  We only support
+			// setting content subtype from encoding.Codec to avoid a behavior
+			// change with the deprecated version.
+			if ec, ok := c.codec.(encoding.Codec); ok {
+				c.contentSubtype = strings.ToLower(ec.Name())
+			}
+		}
+		return nil
+	}
+
+	if c.contentSubtype == "" {
+		// No codec specified in CallOptions; use proto by default.
+		c.codec = encoding.GetCodec(proto.Name)
+		return nil
+	}
+
+	// c.contentSubtype is already lowercased in CallContentSubtype
+	c.codec = encoding.GetCodec(c.contentSubtype)
+	if c.codec == nil {
+		return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
+	}
+	return nil
+}
+
+// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
+// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
+// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
+// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
+type channelzData struct {
+	callsStarted   int64
+	callsFailed    int64
+	callsSucceeded int64
+	// lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
+	// time.Time since it's more costly to atomically update time.Time variable than int64 variable.
+	lastCallStartedTime int64
+}
+
+// The SupportPackageIsVersion variables are referenced from generated protocol
+// buffer files to ensure compatibility with the gRPC version used.  The latest
+// support package version is 7.
+//
+// Older versions are kept for compatibility.
+//
+// These constants should not be referenced from any other code.
+const (
+	SupportPackageIsVersion3 = true
+	SupportPackageIsVersion4 = true
+	SupportPackageIsVersion5 = true
+	SupportPackageIsVersion6 = true
+	SupportPackageIsVersion7 = true
+)
+
+const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
new file mode 100644
index 000000000..d5a6e78be
--- /dev/null
+++ b/vendor/google.golang.org/grpc/server.go
@@ -0,0 +1,1971 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"net/http"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/trace"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/encoding/proto"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/binarylog"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcrand"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+	"google.golang.org/grpc/tap"
+)
+
+const (
+	defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
+	defaultServerMaxSendMessageSize    = math.MaxInt32
+
+	// Server transports are tracked in a map which is keyed on listener
+	// address. For regular gRPC traffic, connections are accepted in Serve()
+	// through a call to Accept(), and we use the actual listener address as key
+	// when we add it to the map. But for connections received through
+	// ServeHTTP(), we do not have a listener and hence use this dummy value.
+	listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
+)
+
+func init() {
+	internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
+		return srv.opts.creds
+	}
+	internal.DrainServerTransports = func(srv *Server, addr string) {
+		srv.drainServerTransports(addr)
+	}
+	internal.AddGlobalServerOptions = func(opt ...ServerOption) {
+		extraServerOptions = append(extraServerOptions, opt...)
+	}
+	internal.ClearGlobalServerOptions = func() {
+		extraServerOptions = nil
+	}
+	internal.BinaryLogger = binaryLogger
+	internal.JoinServerOptions = newJoinServerOption
+}
+
+var statusOK = status.New(codes.OK, "")
+var logger = grpclog.Component("core")
+
+type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
+
+// MethodDesc represents an RPC service's method specification.
+type MethodDesc struct {
+	MethodName string
+	Handler    methodHandler
+}
+
+// ServiceDesc represents an RPC service's specification.
+type ServiceDesc struct {
+	ServiceName string
+	// The pointer to the service interface. Used to check whether the user
+	// provided implementation satisfies the interface requirements.
+	HandlerType interface{}
+	Methods     []MethodDesc
+	Streams     []StreamDesc
+	Metadata    interface{}
+}
+
+// serviceInfo wraps information about a service. It is very similar to
+// ServiceDesc and is constructed from it for internal purposes.
+type serviceInfo struct {
+	// Contains the implementation for the methods in this service.
+	serviceImpl interface{}
+	methods     map[string]*MethodDesc
+	streams     map[string]*StreamDesc
+	mdata       interface{}
+}
+
+type serverWorkerData struct {
+	st     transport.ServerTransport
+	wg     *sync.WaitGroup
+	stream *transport.Stream
+}
+
+// Server is a gRPC server to serve RPC requests.
+type Server struct {
+	opts serverOptions
+
+	mu  sync.Mutex // guards following
+	lis map[net.Listener]bool
+	// conns contains all active server transports. It is a map keyed on a
+	// listener address with the value being the set of active transports
+	// belonging to that listener.
+	conns    map[string]map[transport.ServerTransport]bool
+	serve    bool
+	drain    bool
+	cv       *sync.Cond              // signaled when connections close for GracefulStop
+	services map[string]*serviceInfo // service name -> service info
+	events   trace.EventLog
+
+	quit               *grpcsync.Event
+	done               *grpcsync.Event
+	channelzRemoveOnce sync.Once
+	serveWG            sync.WaitGroup // counts active Serve goroutines for GracefulStop
+
+	channelzID *channelz.Identifier
+	czData     *channelzData
+
+	serverWorkerChannels []chan *serverWorkerData
+}
+
+type serverOptions struct {
+	creds                 credentials.TransportCredentials
+	codec                 baseCodec
+	cp                    Compressor
+	dc                    Decompressor
+	unaryInt              UnaryServerInterceptor
+	streamInt             StreamServerInterceptor
+	chainUnaryInts        []UnaryServerInterceptor
+	chainStreamInts       []StreamServerInterceptor
+	binaryLogger          binarylog.Logger
+	inTapHandle           tap.ServerInHandle
+	statsHandlers         []stats.Handler
+	maxConcurrentStreams  uint32
+	maxReceiveMessageSize int
+	maxSendMessageSize    int
+	unknownStreamDesc     *StreamDesc
+	keepaliveParams       keepalive.ServerParameters
+	keepalivePolicy       keepalive.EnforcementPolicy
+	initialWindowSize     int32
+	initialConnWindowSize int32
+	writeBufferSize       int
+	readBufferSize        int
+	connectionTimeout     time.Duration
+	maxHeaderListSize     *uint32
+	headerTableSize       *uint32
+	numServerWorkers      uint32
+}
+
+var defaultServerOptions = serverOptions{
+	maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
+	maxSendMessageSize:    defaultServerMaxSendMessageSize,
+	connectionTimeout:     120 * time.Second,
+	writeBufferSize:       defaultWriteBufSize,
+	readBufferSize:        defaultReadBufSize,
+}
+var extraServerOptions []ServerOption
+
+// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
+type ServerOption interface {
+	apply(*serverOptions)
+}
+
+// EmptyServerOption does not alter the server configuration. It can be embedded
+// in another structure to build custom server options.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type EmptyServerOption struct{}
+
+func (EmptyServerOption) apply(*serverOptions) {}
+
+// funcServerOption wraps a function that modifies serverOptions into an
+// implementation of the ServerOption interface.
+type funcServerOption struct {
+	f func(*serverOptions)
+}
+
+func (fdo *funcServerOption) apply(do *serverOptions) {
+	fdo.f(do)
+}
+
+func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
+	return &funcServerOption{
+		f: f,
+	}
+}
+
+// joinServerOption provides a way to combine arbitrary number of server
+// options into one.
+type joinServerOption struct {
+	opts []ServerOption
+}
+
+func (mdo *joinServerOption) apply(do *serverOptions) {
+	for _, opt := range mdo.opts {
+		opt.apply(do)
+	}
+}
+
+func newJoinServerOption(opts ...ServerOption) ServerOption {
+	return &joinServerOption{opts: opts}
+}
+
+// WriteBufferSize determines how much data can be batched before doing a write
+// on the wire. The corresponding memory allocation for this buffer will be
+// twice the size to keep syscalls low. The default value for this buffer is
+// 32KB. Zero or negative values will disable the write buffer such that each
+// write will be on underlying connection.
+// Note: A Send call may not directly translate to a write.
+func WriteBufferSize(s int) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.writeBufferSize = s
+	})
+}
+
+// ReadBufferSize lets you set the size of read buffer, this determines how much
+// data can be read at most for one read syscall. The default value for this
+// buffer is 32KB. Zero or negative values will disable read buffer for a
+// connection so data framer can access the underlying conn directly.
+func ReadBufferSize(s int) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.readBufferSize = s
+	})
+}
+
+// InitialWindowSize returns a ServerOption that sets window size for stream.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func InitialWindowSize(s int32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.initialWindowSize = s
+	})
+}
+
+// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func InitialConnWindowSize(s int32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.initialConnWindowSize = s
+	})
+}
+
+// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
+func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
+	if kp.Time > 0 && kp.Time < time.Second {
+		logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
+		kp.Time = time.Second
+	}
+
+	return newFuncServerOption(func(o *serverOptions) {
+		o.keepaliveParams = kp
+	})
+}
+
+// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
+func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.keepalivePolicy = kep
+	})
+}
+
+// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
+//
+// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
+//
+// Deprecated: register codecs using encoding.RegisterCodec. The server will
+// automatically use registered codecs based on the incoming requests' headers.
+// See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
+func CustomCodec(codec Codec) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.codec = codec
+	})
+}
+
+// ForceServerCodec returns a ServerOption that sets a codec for message
+// marshaling and unmarshaling.
+//
+// This will override any lookups by content-subtype for Codecs registered
+// with RegisterCodec.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between encoding.Codec
+// and content-subtype.
+//
+// This function is provided for advanced users; prefer to register codecs
+// using encoding.RegisterCodec.
+// The server will automatically use registered codecs based on the incoming
+// requests' headers. See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodec(codec encoding.Codec) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.codec = codec
+	})
+}
+
+// RPCCompressor returns a ServerOption that sets a compressor for outbound
+// messages.  For backward compatibility, all outbound messages will be sent
+// using this compressor, regardless of incoming message compression.  By
+// default, server messages will be sent using the same compressor with which
+// request messages were sent.
+//
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
+func RPCCompressor(cp Compressor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.cp = cp
+	})
+}
+
+// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
+// messages.  It has higher priority than decompressors registered via
+// encoding.RegisterCompressor.
+//
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
+func RPCDecompressor(dc Decompressor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.dc = dc
+	})
+}
+
+// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
+// If this is not set, gRPC uses the default limit.
+//
+// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
+func MaxMsgSize(m int) ServerOption {
+	return MaxRecvMsgSize(m)
+}
+
+// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
+// If this is not set, gRPC uses the default 4MB.
+func MaxRecvMsgSize(m int) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.maxReceiveMessageSize = m
+	})
+}
+
+// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
+// If this is not set, gRPC uses the default `math.MaxInt32`.
+func MaxSendMsgSize(m int) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.maxSendMessageSize = m
+	})
+}
+
+// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
+// of concurrent streams to each ServerTransport.
+func MaxConcurrentStreams(n uint32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.maxConcurrentStreams = n
+	})
+}
+
+// Creds returns a ServerOption that sets credentials for server connections.
+func Creds(c credentials.TransportCredentials) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.creds = c
+	})
+}
+
+// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
+// server. Only one unary interceptor can be installed. The construction of multiple
+// interceptors (e.g., chaining) can be implemented at the caller.
+func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		if o.unaryInt != nil {
+			panic("The unary server interceptor was already set and may not be reset.")
+		}
+		o.unaryInt = i
+	})
+}
+
+// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
+// for unary RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All unary interceptors added by this method will be chained.
+func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
+	})
+}
+
+// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
+// server. Only one stream interceptor can be installed.
+func StreamInterceptor(i StreamServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		if o.streamInt != nil {
+			panic("The stream server interceptor was already set and may not be reset.")
+		}
+		o.streamInt = i
+	})
+}
+
+// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
+// for streaming RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All stream interceptors added by this method will be chained.
+func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
+	})
+}
+
+// InTapHandle returns a ServerOption that sets the tap handle for all the server
+// transport to be created. Only one can be installed.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InTapHandle(h tap.ServerInHandle) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		if o.inTapHandle != nil {
+			panic("The tap handle was already set and may not be reset.")
+		}
+		o.inTapHandle = h
+	})
+}
+
+// StatsHandler returns a ServerOption that sets the stats handler for the server.
+func StatsHandler(h stats.Handler) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		if h == nil {
+			logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption")
+			// Do not allow a nil stats handler, which would otherwise cause
+			// panics.
+			return
+		}
+		o.statsHandlers = append(o.statsHandlers, h)
+	})
+}
+
+// binaryLogger returns a ServerOption that can set the binary logger for the
+// server.
+func binaryLogger(bl binarylog.Logger) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.binaryLogger = bl
+	})
+}
+
+// UnknownServiceHandler returns a ServerOption that allows for adding a custom
+// unknown service handler. The provided method is a bidi-streaming RPC service
+// handler that will be invoked instead of returning the "unimplemented" gRPC
+// error whenever a request is received for an unregistered service or method.
+// The handling function and stream interceptor (if set) have full access to
+// the ServerStream, including its Context.
+func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.unknownStreamDesc = &StreamDesc{
+			StreamName: "unknown_service_handler",
+			Handler:    streamHandler,
+			// We need to assume that the users of the streamHandler will want to use both.
+			ClientStreams: true,
+			ServerStreams: true,
+		}
+	})
+}
+
+// ConnectionTimeout returns a ServerOption that sets the timeout for
+// connection establishment (up to and including HTTP/2 handshaking) for all
+// new connections.  If this is not set, the default is 120 seconds.  A zero or
+// negative value will result in an immediate timeout.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ConnectionTimeout(d time.Duration) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.connectionTimeout = d
+	})
+}
+
+// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
+// of header list that the server is prepared to accept.
+func MaxHeaderListSize(s uint32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.maxHeaderListSize = &s
+	})
+}
+
+// HeaderTableSize returns a ServerOption that sets the size of dynamic
+// header table for stream.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func HeaderTableSize(s uint32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.headerTableSize = &s
+	})
+}
+
+// NumStreamWorkers returns a ServerOption that sets the number of worker
+// goroutines that should be used to process incoming streams. Setting this to
+// zero (default) will disable workers and spawn a new goroutine for each
+// stream.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NumStreamWorkers(numServerWorkers uint32) ServerOption {
+	// TODO: If/when this API gets stabilized (i.e. stream workers become the
+	// only way streams are processed), change the behavior of the zero value to
+	// a sane default. Preliminary experiments suggest that a value equal to the
+	// number of CPUs available is most performant; requires thorough testing.
+	return newFuncServerOption(func(o *serverOptions) {
+		o.numServerWorkers = numServerWorkers
+	})
+}
+
+// serverWorkerResetThreshold defines how often the stack must be reset. Every
+// N requests, by spawning a new goroutine in its place, a worker can reset its
+// stack so that large stacks don't live in memory forever. 2^16 should allow
+// each goroutine stack to live for at least a few seconds in a typical
+// workload (assuming a QPS of a few thousand requests/sec).
+const serverWorkerResetThreshold = 1 << 16
+
+// serverWorkers blocks on a *transport.Stream channel forever and waits for
+// data to be fed by serveStreams. This allows different requests to be
+// processed by the same goroutine, removing the need for expensive stack
+// re-allocations (see the runtime.morestack problem [1]).
+//
+// [1] https://github.com/golang/go/issues/18138
+func (s *Server) serverWorker(ch chan *serverWorkerData) {
+	// To make sure all server workers don't reset at the same time, choose a
+	// random number of iterations before resetting.
+	threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
+	for completed := 0; completed < threshold; completed++ {
+		data, ok := <-ch
+		if !ok {
+			return
+		}
+		s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
+		data.wg.Done()
+	}
+	go s.serverWorker(ch)
+}
+
+// initServerWorkers creates worker goroutines and channels to process incoming
+// connections to reduce the time spent overall on runtime.morestack.
+func (s *Server) initServerWorkers() {
+	s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
+	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
+		s.serverWorkerChannels[i] = make(chan *serverWorkerData)
+		go s.serverWorker(s.serverWorkerChannels[i])
+	}
+}
+
+func (s *Server) stopServerWorkers() {
+	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
+		close(s.serverWorkerChannels[i])
+	}
+}
+
+// NewServer creates a gRPC server which has no service registered and has not
+// started to accept requests yet.
+func NewServer(opt ...ServerOption) *Server {
+	opts := defaultServerOptions
+	for _, o := range extraServerOptions {
+		o.apply(&opts)
+	}
+	for _, o := range opt {
+		o.apply(&opts)
+	}
+	s := &Server{
+		lis:      make(map[net.Listener]bool),
+		opts:     opts,
+		conns:    make(map[string]map[transport.ServerTransport]bool),
+		services: make(map[string]*serviceInfo),
+		quit:     grpcsync.NewEvent(),
+		done:     grpcsync.NewEvent(),
+		czData:   new(channelzData),
+	}
+	chainUnaryServerInterceptors(s)
+	chainStreamServerInterceptors(s)
+	s.cv = sync.NewCond(&s.mu)
+	if EnableTracing {
+		_, file, line, _ := runtime.Caller(1)
+		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
+	}
+
+	if s.opts.numServerWorkers > 0 {
+		s.initServerWorkers()
+	}
+
+	s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
+	channelz.Info(logger, s.channelzID, "Server created")
+	return s
+}
+
+// printf records an event in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) printf(format string, a ...interface{}) {
+	if s.events != nil {
+		s.events.Printf(format, a...)
+	}
+}
+
+// errorf records an error in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) errorf(format string, a ...interface{}) {
+	if s.events != nil {
+		s.events.Errorf(format, a...)
+	}
+}
+
+// ServiceRegistrar wraps a single method that supports service registration. It
+// enables users to pass concrete types other than grpc.Server to the service
+// registration methods exported by the IDL generated code.
+type ServiceRegistrar interface {
+	// RegisterService registers a service and its implementation to the
+	// concrete type implementing this interface.  It may not be called
+	// once the server has started serving.
+	// desc describes the service and its methods and handlers. impl is the
+	// service implementation which is passed to the method handlers.
+	RegisterService(desc *ServiceDesc, impl interface{})
+}
+
+// RegisterService registers a service and its implementation to the gRPC
+// server. It is called from the IDL generated code. This must be called before
+// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
+// ensure it implements sd.HandlerType.
+func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
+	if ss != nil {
+		ht := reflect.TypeOf(sd.HandlerType).Elem()
+		st := reflect.TypeOf(ss)
+		if !st.Implements(ht) {
+			logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+		}
+	}
+	s.register(sd, ss)
+}
+
+func (s *Server) register(sd *ServiceDesc, ss interface{}) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.printf("RegisterService(%q)", sd.ServiceName)
+	if s.serve {
+		logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
+	}
+	if _, ok := s.services[sd.ServiceName]; ok {
+		logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
+	}
+	info := &serviceInfo{
+		serviceImpl: ss,
+		methods:     make(map[string]*MethodDesc),
+		streams:     make(map[string]*StreamDesc),
+		mdata:       sd.Metadata,
+	}
+	for i := range sd.Methods {
+		d := &sd.Methods[i]
+		info.methods[d.MethodName] = d
+	}
+	for i := range sd.Streams {
+		d := &sd.Streams[i]
+		info.streams[d.StreamName] = d
+	}
+	s.services[sd.ServiceName] = info
+}
+
+// MethodInfo contains the information of an RPC including its method name and type.
+type MethodInfo struct {
+	// Name is the method name only, without the service name or package name.
+	Name string
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+}
+
+// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
+type ServiceInfo struct {
+	Methods []MethodInfo
+	// Metadata is the metadata specified in ServiceDesc when registering service.
+	Metadata interface{}
+}
+
+// GetServiceInfo returns a map from service names to ServiceInfo.
+// Service names include the package names, in the form of <package>.<service>.
+func (s *Server) GetServiceInfo() map[string]ServiceInfo {
+	ret := make(map[string]ServiceInfo)
+	for n, srv := range s.services {
+		methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
+		for m := range srv.methods {
+			methods = append(methods, MethodInfo{
+				Name:           m,
+				IsClientStream: false,
+				IsServerStream: false,
+			})
+		}
+		for m, d := range srv.streams {
+			methods = append(methods, MethodInfo{
+				Name:           m,
+				IsClientStream: d.ClientStreams,
+				IsServerStream: d.ServerStreams,
+			})
+		}
+
+		ret[n] = ServiceInfo{
+			Methods:  methods,
+			Metadata: srv.mdata,
+		}
+	}
+	return ret
+}
+
+// ErrServerStopped indicates that the operation is now illegal because of
+// the server being stopped.
+var ErrServerStopped = errors.New("grpc: the server has been stopped")
+
+type listenSocket struct {
+	net.Listener
+	channelzID *channelz.Identifier
+}
+
+func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
+	return &channelz.SocketInternalMetric{
+		SocketOptions: channelz.GetSocketOption(l.Listener),
+		LocalAddr:     l.Listener.Addr(),
+	}
+}
+
+func (l *listenSocket) Close() error {
+	err := l.Listener.Close()
+	channelz.RemoveEntry(l.channelzID)
+	channelz.Info(logger, l.channelzID, "ListenSocket deleted")
+	return err
+}
+
+// Serve accepts incoming connections on the listener lis, creating a new
+// ServerTransport and service goroutine for each. The service goroutines
+// read gRPC requests and then call the registered handlers to reply to them.
+// Serve returns when lis.Accept fails with fatal errors.  lis will be closed when
+// this method returns.
+// Serve will return a non-nil error unless Stop or GracefulStop is called.
+func (s *Server) Serve(lis net.Listener) error {
+	s.mu.Lock()
+	s.printf("serving")
+	s.serve = true
+	if s.lis == nil {
+		// Serve called after Stop or GracefulStop.
+		s.mu.Unlock()
+		lis.Close()
+		return ErrServerStopped
+	}
+
+	s.serveWG.Add(1)
+	defer func() {
+		s.serveWG.Done()
+		if s.quit.HasFired() {
+			// Stop or GracefulStop called; block until done and return nil.
+			<-s.done.Done()
+		}
+	}()
+
+	ls := &listenSocket{Listener: lis}
+	s.lis[ls] = true
+
+	defer func() {
+		s.mu.Lock()
+		if s.lis != nil && s.lis[ls] {
+			ls.Close()
+			delete(s.lis, ls)
+		}
+		s.mu.Unlock()
+	}()
+
+	var err error
+	ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
+	if err != nil {
+		s.mu.Unlock()
+		return err
+	}
+	s.mu.Unlock()
+	channelz.Info(logger, ls.channelzID, "ListenSocket created")
+
+	var tempDelay time.Duration // how long to sleep on accept failure
+	for {
+		rawConn, err := lis.Accept()
+		if err != nil {
+			if ne, ok := err.(interface {
+				Temporary() bool
+			}); ok && ne.Temporary() {
+				if tempDelay == 0 {
+					tempDelay = 5 * time.Millisecond
+				} else {
+					tempDelay *= 2
+				}
+				if max := 1 * time.Second; tempDelay > max {
+					tempDelay = max
+				}
+				s.mu.Lock()
+				s.printf("Accept error: %v; retrying in %v", err, tempDelay)
+				s.mu.Unlock()
+				timer := time.NewTimer(tempDelay)
+				select {
+				case <-timer.C:
+				case <-s.quit.Done():
+					timer.Stop()
+					return nil
+				}
+				continue
+			}
+			s.mu.Lock()
+			s.printf("done serving; Accept = %v", err)
+			s.mu.Unlock()
+
+			if s.quit.HasFired() {
+				return nil
+			}
+			return err
+		}
+		tempDelay = 0
+		// Start a new goroutine to deal with rawConn so we don't stall this Accept
+		// loop goroutine.
+		//
+		// Make sure we account for the goroutine so GracefulStop doesn't nil out
+		// s.conns before this conn can be added.
+		s.serveWG.Add(1)
+		go func() {
+			s.handleRawConn(lis.Addr().String(), rawConn)
+			s.serveWG.Done()
+		}()
+	}
+}
+
+// handleRawConn forks a goroutine to handle a just-accepted connection that
+// has not had any I/O performed on it yet.
+func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
+	if s.quit.HasFired() {
+		rawConn.Close()
+		return
+	}
+	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
+
+	// Finish handshaking (HTTP2)
+	st := s.newHTTP2Transport(rawConn)
+	rawConn.SetDeadline(time.Time{})
+	if st == nil {
+		return
+	}
+
+	if !s.addConn(lisAddr, st) {
+		return
+	}
+	go func() {
+		s.serveStreams(st)
+		s.removeConn(lisAddr, st)
+	}()
+}
+
+func (s *Server) drainServerTransports(addr string) {
+	s.mu.Lock()
+	conns := s.conns[addr]
+	for st := range conns {
+		st.Drain()
+	}
+	s.mu.Unlock()
+}
+
+// newHTTP2Transport sets up a http/2 transport (using the
+// gRPC http2 server transport in transport/http2_server.go).
+func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
+	config := &transport.ServerConfig{
+		MaxStreams:            s.opts.maxConcurrentStreams,
+		ConnectionTimeout:     s.opts.connectionTimeout,
+		Credentials:           s.opts.creds,
+		InTapHandle:           s.opts.inTapHandle,
+		StatsHandlers:         s.opts.statsHandlers,
+		KeepaliveParams:       s.opts.keepaliveParams,
+		KeepalivePolicy:       s.opts.keepalivePolicy,
+		InitialWindowSize:     s.opts.initialWindowSize,
+		InitialConnWindowSize: s.opts.initialConnWindowSize,
+		WriteBufferSize:       s.opts.writeBufferSize,
+		ReadBufferSize:        s.opts.readBufferSize,
+		ChannelzParentID:      s.channelzID,
+		MaxHeaderListSize:     s.opts.maxHeaderListSize,
+		HeaderTableSize:       s.opts.headerTableSize,
+	}
+	st, err := transport.NewServerTransport(c, config)
+	if err != nil {
+		s.mu.Lock()
+		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
+		s.mu.Unlock()
+		// ErrConnDispatched means that the connection was dispatched away from
+		// gRPC; those connections should be left open.
+		if err != credentials.ErrConnDispatched {
+			// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
+			if err != io.EOF {
+				channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
+			}
+			c.Close()
+		}
+		return nil
+	}
+
+	return st
+}
+
+func (s *Server) serveStreams(st transport.ServerTransport) {
+	defer st.Close(errors.New("finished serving streams for the server transport"))
+	var wg sync.WaitGroup
+
+	var roundRobinCounter uint32
+	st.HandleStreams(func(stream *transport.Stream) {
+		wg.Add(1)
+		if s.opts.numServerWorkers > 0 {
+			data := &serverWorkerData{st: st, wg: &wg, stream: stream}
+			select {
+			case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
+			default:
+				// If all stream workers are busy, fallback to the default code path.
+				go func() {
+					s.handleStream(st, stream, s.traceInfo(st, stream))
+					wg.Done()
+				}()
+			}
+		} else {
+			go func() {
+				defer wg.Done()
+				s.handleStream(st, stream, s.traceInfo(st, stream))
+			}()
+		}
+	}, func(ctx context.Context, method string) context.Context {
+		if !EnableTracing {
+			return ctx
+		}
+		tr := trace.New("grpc.Recv."+methodFamily(method), method)
+		return trace.NewContext(ctx, tr)
+	})
+	wg.Wait()
+}
+
+var _ http.Handler = (*Server)(nil)
+
+// ServeHTTP implements the Go standard library's http.Handler
+// interface by responding to the gRPC request r, by looking up
+// the requested gRPC method in the gRPC server s.
+//
+// The provided HTTP request must have arrived on an HTTP/2
+// connection. When using the Go standard library's server,
+// practically this means that the Request must also have arrived
+// over TLS.
+//
+// To share one port (such as 443 for https) between gRPC and an
+// existing http.Handler, use a root http.Handler such as:
+//
+//	if r.ProtoMajor == 2 && strings.HasPrefix(
+//		r.Header.Get("Content-Type"), "application/grpc") {
+//		grpcServer.ServeHTTP(w, r)
+//	} else {
+//		yourMux.ServeHTTP(w, r)
+//	}
+//
+// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
+// separate from grpc-go's HTTP/2 server. Performance and features may vary
+// between the two paths. ServeHTTP does not support some gRPC features
+// available through grpc-go's HTTP/2 server.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
+	if err != nil {
+		// Errors returned from transport.NewServerHandlerTransport have
+		// already been written to w.
+		return
+	}
+	if !s.addConn(listenerAddressForServeHTTP, st) {
+		return
+	}
+	defer s.removeConn(listenerAddressForServeHTTP, st)
+	s.serveStreams(st)
+}
+
+// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
+// If tracing is not enabled, it returns nil.
+func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
+	if !EnableTracing {
+		return nil
+	}
+	tr, ok := trace.FromContext(stream.Context())
+	if !ok {
+		return nil
+	}
+
+	trInfo = &traceInfo{
+		tr: tr,
+		firstLine: firstLine{
+			client:     false,
+			remoteAddr: st.RemoteAddr(),
+		},
+	}
+	if dl, ok := stream.Context().Deadline(); ok {
+		trInfo.firstLine.deadline = time.Until(dl)
+	}
+	return trInfo
+}
+
+func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.conns == nil {
+		st.Close(errors.New("Server.addConn called when server has already been stopped"))
+		return false
+	}
+	if s.drain {
+		// Transport added after we drained our existing conns: drain it
+		// immediately.
+		st.Drain()
+	}
+
+	if s.conns[addr] == nil {
+		// Create a map entry if this is the first connection on this listener.
+		s.conns[addr] = make(map[transport.ServerTransport]bool)
+	}
+	s.conns[addr][st] = true
+	return true
+}
+
+func (s *Server) removeConn(addr string, st transport.ServerTransport) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	conns := s.conns[addr]
+	if conns != nil {
+		delete(conns, st)
+		if len(conns) == 0 {
+			// If the last connection for this address is being removed, also
+			// remove the map entry corresponding to the address. This is used
+			// in GracefulStop() when waiting for all connections to be closed.
+			delete(s.conns, addr)
+		}
+		s.cv.Broadcast()
+	}
+}
+
+func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
+	return &channelz.ServerInternalMetric{
+		CallsStarted:             atomic.LoadInt64(&s.czData.callsStarted),
+		CallsSucceeded:           atomic.LoadInt64(&s.czData.callsSucceeded),
+		CallsFailed:              atomic.LoadInt64(&s.czData.callsFailed),
+		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
+	}
+}
+
+func (s *Server) incrCallsStarted() {
+	atomic.AddInt64(&s.czData.callsStarted, 1)
+	atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
+}
+
+func (s *Server) incrCallsSucceeded() {
+	atomic.AddInt64(&s.czData.callsSucceeded, 1)
+}
+
+func (s *Server) incrCallsFailed() {
+	atomic.AddInt64(&s.czData.callsFailed, 1)
+}
+
+func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
+	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
+	if err != nil {
+		channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
+		return err
+	}
+	compData, err := compress(data, cp, comp)
+	if err != nil {
+		channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err)
+		return err
+	}
+	hdr, payload := msgHeader(data, compData)
+	// TODO(dfawley): should we be checking len(data) instead?
+	if len(payload) > s.opts.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
+	}
+	err = t.Write(stream, hdr, payload, opts)
+	if err == nil {
+		for _, sh := range s.opts.statsHandlers {
+			sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
+		}
+	}
+	return err
+}
+
+// chainUnaryServerInterceptors chains all unary server interceptors into one.
+func chainUnaryServerInterceptors(s *Server) {
+	// Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
+	// be executed before any other chained interceptors.
+	interceptors := s.opts.chainUnaryInts
+	if s.opts.unaryInt != nil {
+		interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
+	}
+
+	var chainedInt UnaryServerInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = chainUnaryInterceptors(interceptors)
+	}
+
+	s.opts.unaryInt = chainedInt
+}
+
+func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
+	return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
+		return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
+	}
+}
+
+func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
+	if curr == len(interceptors)-1 {
+		return finalHandler
+	}
+	return func(ctx context.Context, req interface{}) (interface{}, error) {
+		return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
+	}
+}
+
+func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
+	shs := s.opts.statsHandlers
+	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+		if channelz.IsOn() {
+			s.incrCallsStarted()
+		}
+		var statsBegin *stats.Begin
+		for _, sh := range shs {
+			beginTime := time.Now()
+			statsBegin = &stats.Begin{
+				BeginTime:      beginTime,
+				IsClientStream: false,
+				IsServerStream: false,
+			}
+			sh.HandleRPC(stream.Context(), statsBegin)
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		}
+		// The deferred error handling for tracing, stats handler and channelz are
+		// combined into one function to reduce stack usage -- a defer takes ~56-64
+		// bytes on the stack, so overflowing the stack will require a stack
+		// re-allocation, which is expensive.
+		//
+		// To maintain behavior similar to separate deferred statements, statements
+		// should be executed in the reverse order. That is, tracing first, stats
+		// handler second, and channelz last. Note that panics *within* defers will
+		// lead to different behavior, but that's an acceptable compromise; that
+		// would be undefined behavior territory anyway.
+		defer func() {
+			if trInfo != nil {
+				if err != nil && err != io.EOF {
+					trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					trInfo.tr.SetError()
+				}
+				trInfo.tr.Finish()
+			}
+
+			for _, sh := range shs {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				sh.HandleRPC(stream.Context(), end)
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
+			}
+		}()
+	}
+	var binlogs []binarylog.MethodLogger
+	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
+		binlogs = append(binlogs, ml)
+	}
+	if s.opts.binaryLogger != nil {
+		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
+			binlogs = append(binlogs, ml)
+		}
+	}
+	if len(binlogs) != 0 {
+		ctx := stream.Context()
+		md, _ := metadata.FromIncomingContext(ctx)
+		logEntry := &binarylog.ClientHeader{
+			Header:     md,
+			MethodName: stream.Method(),
+			PeerAddr:   nil,
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			logEntry.Timeout = time.Until(deadline)
+			if logEntry.Timeout < 0 {
+				logEntry.Timeout = 0
+			}
+		}
+		if a := md[":authority"]; len(a) > 0 {
+			logEntry.Authority = a[0]
+		}
+		if peer, ok := peer.FromContext(ctx); ok {
+			logEntry.PeerAddr = peer.Addr
+		}
+		for _, binlog := range binlogs {
+			binlog.Log(logEntry)
+		}
+	}
+
+	// comp and cp are used for compression.  decomp and dc are used for
+	// decompression.  If comp and decomp are both set, they are the same;
+	// however they are kept separate to ensure that at most one of the
+	// compressor/decompressor variable pairs are set for use later.
+	var comp, decomp encoding.Compressor
+	var cp Compressor
+	var dc Decompressor
+
+	// If dc is set and matches the stream's compression, use it.  Otherwise, try
+	// to find a matching registered compressor for decomp.
+	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
+		dc = s.opts.dc
+	} else if rc != "" && rc != encoding.Identity {
+		decomp = encoding.GetCompressor(rc)
+		if decomp == nil {
+			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
+			t.WriteStatus(stream, st)
+			return st.Err()
+		}
+	}
+
+	// If cp is set, use it.  Otherwise, attempt to compress the response using
+	// the incoming message compression method.
+	//
+	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
+	if s.opts.cp != nil {
+		cp = s.opts.cp
+		stream.SetSendCompress(cp.Type())
+	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
+		// Legacy compressor not specified; attempt to respond with same encoding.
+		comp = encoding.GetCompressor(rc)
+		if comp != nil {
+			stream.SetSendCompress(rc)
+		}
+	}
+
+	var payInfo *payloadInfo
+	if len(shs) != 0 || len(binlogs) != 0 {
+		payInfo = &payloadInfo{}
+	}
+	d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
+	if err != nil {
+		if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
+			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+		}
+		return err
+	}
+	if channelz.IsOn() {
+		t.IncrMsgRecv()
+	}
+	df := func(v interface{}) error {
+		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
+			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
+		}
+		for _, sh := range shs {
+			sh.HandleRPC(stream.Context(), &stats.InPayload{
+				RecvTime:   time.Now(),
+				Payload:    v,
+				WireLength: payInfo.wireLength + headerLen,
+				Data:       d,
+				Length:     len(d),
+			})
+		}
+		if len(binlogs) != 0 {
+			cm := &binarylog.ClientMessage{
+				Message: d,
+			}
+			for _, binlog := range binlogs {
+				binlog.Log(cm)
+			}
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
+		}
+		return nil
+	}
+	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
+	reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
+	if appErr != nil {
+		appStatus, ok := status.FromError(appErr)
+		if !ok {
+			// Convert non-status application error to a status error with code
+			// Unknown, but handle context errors specifically.
+			appStatus = status.FromContextError(appErr)
+			appErr = appStatus.Err()
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
+			trInfo.tr.SetError()
+		}
+		if e := t.WriteStatus(stream, appStatus); e != nil {
+			channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+		}
+		if len(binlogs) != 0 {
+			if h, _ := stream.Header(); h.Len() > 0 {
+				// Only log serverHeader if there was header. Otherwise it can
+				// be trailer only.
+				sh := &binarylog.ServerHeader{
+					Header: h,
+				}
+				for _, binlog := range binlogs {
+					binlog.Log(sh)
+				}
+			}
+			st := &binarylog.ServerTrailer{
+				Trailer: stream.Trailer(),
+				Err:     appErr,
+			}
+			for _, binlog := range binlogs {
+				binlog.Log(st)
+			}
+		}
+		return appErr
+	}
+	if trInfo != nil {
+		trInfo.tr.LazyLog(stringer("OK"), false)
+	}
+	opts := &transport.Options{Last: true}
+
+	if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
+		if err == io.EOF {
+			// The entire stream is done (for unary RPC only).
+			return err
+		}
+		if sts, ok := status.FromError(err); ok {
+			if e := t.WriteStatus(stream, sts); e != nil {
+				channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+			}
+		} else {
+			switch st := err.(type) {
+			case transport.ConnectionError:
+				// Nothing to do here.
+			default:
+				panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
+			}
+		}
+		if len(binlogs) != 0 {
+			h, _ := stream.Header()
+			sh := &binarylog.ServerHeader{
+				Header: h,
+			}
+			st := &binarylog.ServerTrailer{
+				Trailer: stream.Trailer(),
+				Err:     appErr,
+			}
+			for _, binlog := range binlogs {
+				binlog.Log(sh)
+				binlog.Log(st)
+			}
+		}
+		return err
+	}
+	if len(binlogs) != 0 {
+		h, _ := stream.Header()
+		sh := &binarylog.ServerHeader{
+			Header: h,
+		}
+		sm := &binarylog.ServerMessage{
+			Message: reply,
+		}
+		for _, binlog := range binlogs {
+			binlog.Log(sh)
+			binlog.Log(sm)
+		}
+	}
+	if channelz.IsOn() {
+		t.IncrMsgSent()
+	}
+	if trInfo != nil {
+		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
+	}
+	// TODO: Should we be logging if writing status failed here, like above?
+	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus
+	// error or allow the stats handler to see it?
+	err = t.WriteStatus(stream, statusOK)
+	if len(binlogs) != 0 {
+		st := &binarylog.ServerTrailer{
+			Trailer: stream.Trailer(),
+			Err:     appErr,
+		}
+		for _, binlog := range binlogs {
+			binlog.Log(st)
+		}
+	}
+	return err
+}
+
+// chainStreamServerInterceptors chains all stream server interceptors into one.
+func chainStreamServerInterceptors(s *Server) {
+	// Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
+	// be executed before any other chained interceptors.
+	interceptors := s.opts.chainStreamInts
+	if s.opts.streamInt != nil {
+		interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
+	}
+
+	var chainedInt StreamServerInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = chainStreamInterceptors(interceptors)
+	}
+
+	s.opts.streamInt = chainedInt
+}
+
+func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
+	return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
+		return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
+	}
+}
+
+func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
+	if curr == len(interceptors)-1 {
+		return finalHandler
+	}
+	return func(srv interface{}, stream ServerStream) error {
+		return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
+	}
+}
+
+func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
+	if channelz.IsOn() {
+		s.incrCallsStarted()
+	}
+	shs := s.opts.statsHandlers
+	var statsBegin *stats.Begin
+	if len(shs) != 0 {
+		beginTime := time.Now()
+		statsBegin = &stats.Begin{
+			BeginTime:      beginTime,
+			IsClientStream: sd.ClientStreams,
+			IsServerStream: sd.ServerStreams,
+		}
+		for _, sh := range shs {
+			sh.HandleRPC(stream.Context(), statsBegin)
+		}
+	}
+	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
+	ss := &serverStream{
+		ctx:                   ctx,
+		t:                     t,
+		s:                     stream,
+		p:                     &parser{r: stream},
+		codec:                 s.getCodec(stream.ContentSubtype()),
+		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
+		maxSendMessageSize:    s.opts.maxSendMessageSize,
+		trInfo:                trInfo,
+		statsHandler:          shs,
+	}
+
+	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+		// See comment in processUnaryRPC on defers.
+		defer func() {
+			if trInfo != nil {
+				ss.mu.Lock()
+				if err != nil && err != io.EOF {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+				ss.trInfo.tr.Finish()
+				ss.trInfo.tr = nil
+				ss.mu.Unlock()
+			}
+
+			if len(shs) != 0 {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				for _, sh := range shs {
+					sh.HandleRPC(stream.Context(), end)
+				}
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
+			}
+		}()
+	}
+
+	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
+		ss.binlogs = append(ss.binlogs, ml)
+	}
+	if s.opts.binaryLogger != nil {
+		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
+			ss.binlogs = append(ss.binlogs, ml)
+		}
+	}
+	if len(ss.binlogs) != 0 {
+		md, _ := metadata.FromIncomingContext(ctx)
+		logEntry := &binarylog.ClientHeader{
+			Header:     md,
+			MethodName: stream.Method(),
+			PeerAddr:   nil,
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			logEntry.Timeout = time.Until(deadline)
+			if logEntry.Timeout < 0 {
+				logEntry.Timeout = 0
+			}
+		}
+		if a := md[":authority"]; len(a) > 0 {
+			logEntry.Authority = a[0]
+		}
+		if peer, ok := peer.FromContext(ss.Context()); ok {
+			logEntry.PeerAddr = peer.Addr
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(logEntry)
+		}
+	}
+
+	// If dc is set and matches the stream's compression, use it.  Otherwise, try
+	// to find a matching registered compressor for decomp.
+	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
+		ss.dc = s.opts.dc
+	} else if rc != "" && rc != encoding.Identity {
+		ss.decomp = encoding.GetCompressor(rc)
+		if ss.decomp == nil {
+			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
+			t.WriteStatus(ss.s, st)
+			return st.Err()
+		}
+	}
+
+	// If cp is set, use it.  Otherwise, attempt to compress the response using
+	// the incoming message compression method.
+	//
+	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
+	if s.opts.cp != nil {
+		ss.cp = s.opts.cp
+		stream.SetSendCompress(s.opts.cp.Type())
+	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
+		// Legacy compressor not specified; attempt to respond with same encoding.
+		ss.comp = encoding.GetCompressor(rc)
+		if ss.comp != nil {
+			stream.SetSendCompress(rc)
+		}
+	}
+
+	ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp)
+
+	if trInfo != nil {
+		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+	}
+	var appErr error
+	var server interface{}
+	if info != nil {
+		server = info.serviceImpl
+	}
+	if s.opts.streamInt == nil {
+		appErr = sd.Handler(server, ss)
+	} else {
+		info := &StreamServerInfo{
+			FullMethod:     stream.Method(),
+			IsClientStream: sd.ClientStreams,
+			IsServerStream: sd.ServerStreams,
+		}
+		appErr = s.opts.streamInt(server, ss, info, sd.Handler)
+	}
+	if appErr != nil {
+		appStatus, ok := status.FromError(appErr)
+		if !ok {
+			// Convert non-status application error to a status error with code
+			// Unknown, but handle context errors specifically.
+			appStatus = status.FromContextError(appErr)
+			appErr = appStatus.Err()
+		}
+		if trInfo != nil {
+			ss.mu.Lock()
+			ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
+			ss.trInfo.tr.SetError()
+			ss.mu.Unlock()
+		}
+		t.WriteStatus(ss.s, appStatus)
+		if len(ss.binlogs) != 0 {
+			st := &binarylog.ServerTrailer{
+				Trailer: ss.s.Trailer(),
+				Err:     appErr,
+			}
+			for _, binlog := range ss.binlogs {
+				binlog.Log(st)
+			}
+		}
+		// TODO: Should we log an error from WriteStatus here and below?
+		return appErr
+	}
+	if trInfo != nil {
+		ss.mu.Lock()
+		ss.trInfo.tr.LazyLog(stringer("OK"), false)
+		ss.mu.Unlock()
+	}
+	err = t.WriteStatus(ss.s, statusOK)
+	if len(ss.binlogs) != 0 {
+		st := &binarylog.ServerTrailer{
+			Trailer: ss.s.Trailer(),
+			Err:     appErr,
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(st)
+		}
+	}
+	return err
+}
+
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
+	sm := stream.Method()
+	if sm != "" && sm[0] == '/' {
+		sm = sm[1:]
+	}
+	pos := strings.LastIndex(sm, "/")
+	if pos == -1 {
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
+			trInfo.tr.SetError()
+		}
+		errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
+		if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
+			if trInfo != nil {
+				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+				trInfo.tr.SetError()
+			}
+			channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
+		}
+		if trInfo != nil {
+			trInfo.tr.Finish()
+		}
+		return
+	}
+	service := sm[:pos]
+	method := sm[pos+1:]
+
+	srv, knownService := s.services[service]
+	if knownService {
+		if md, ok := srv.methods[method]; ok {
+			s.processUnaryRPC(t, stream, srv, md, trInfo)
+			return
+		}
+		if sd, ok := srv.streams[method]; ok {
+			s.processStreamingRPC(t, stream, srv, sd, trInfo)
+			return
+		}
+	}
+	// Unknown service, or known server unknown method.
+	if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
+		s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+		return
+	}
+	var errDesc string
+	if !knownService {
+		errDesc = fmt.Sprintf("unknown service %v", service)
+	} else {
+		errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
+	}
+	if trInfo != nil {
+		trInfo.tr.LazyPrintf("%s", errDesc)
+		trInfo.tr.SetError()
+	}
+	if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+			trInfo.tr.SetError()
+		}
+		channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
+	}
+	if trInfo != nil {
+		trInfo.tr.Finish()
+	}
+}
+
+// The key to save ServerTransportStream in the context.
+type streamKey struct{}
+
+// NewContextWithServerTransportStream creates a new context from ctx and
+// attaches stream to it.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
+	return context.WithValue(ctx, streamKey{}, stream)
+}
+
+// ServerTransportStream is a minimal interface that a transport stream must
+// implement. This can be used to mock an actual transport stream for tests of
+// handler code that use, for example, grpc.SetHeader (which requires some
+// stream to be in context).
+//
+// See also NewContextWithServerTransportStream.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ServerTransportStream interface {
+	Method() string
+	SetHeader(md metadata.MD) error
+	SendHeader(md metadata.MD) error
+	SetTrailer(md metadata.MD) error
+}
+
+// ServerTransportStreamFromContext returns the ServerTransportStream saved in
+// ctx. Returns nil if the given context has no stream associated with it
+// (which implies it is not an RPC invocation context).
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
+	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
+	return s
+}
+
+// Stop stops the gRPC server. It immediately closes all open
+// connections and listeners.
+// It cancels all active RPCs on the server side and the corresponding
+// pending RPCs on the client side will get notified by connection
+// errors.
+func (s *Server) Stop() {
+	s.quit.Fire()
+
+	defer func() {
+		s.serveWG.Wait()
+		s.done.Fire()
+	}()
+
+	s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
+
+	s.mu.Lock()
+	listeners := s.lis
+	s.lis = nil
+	conns := s.conns
+	s.conns = nil
+	// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
+	s.cv.Broadcast()
+	s.mu.Unlock()
+
+	for lis := range listeners {
+		lis.Close()
+	}
+	for _, cs := range conns {
+		for st := range cs {
+			st.Close(errors.New("Server.Stop called"))
+		}
+	}
+	if s.opts.numServerWorkers > 0 {
+		s.stopServerWorkers()
+	}
+
+	s.mu.Lock()
+	if s.events != nil {
+		s.events.Finish()
+		s.events = nil
+	}
+	s.mu.Unlock()
+}
+
+// GracefulStop stops the gRPC server gracefully. It stops the server from
+// accepting new connections and RPCs and blocks until all the pending RPCs are
+// finished.
+func (s *Server) GracefulStop() {
+	s.quit.Fire()
+	defer s.done.Fire()
+
+	s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
+	s.mu.Lock()
+	if s.conns == nil {
+		s.mu.Unlock()
+		return
+	}
+
+	for lis := range s.lis {
+		lis.Close()
+	}
+	s.lis = nil
+	if !s.drain {
+		for _, conns := range s.conns {
+			for st := range conns {
+				st.Drain()
+			}
+		}
+		s.drain = true
+	}
+
+	// Wait for serving threads to be ready to exit.  Only then can we be sure no
+	// new conns will be created.
+	s.mu.Unlock()
+	s.serveWG.Wait()
+	s.mu.Lock()
+
+	for len(s.conns) != 0 {
+		s.cv.Wait()
+	}
+	s.conns = nil
+	if s.events != nil {
+		s.events.Finish()
+		s.events = nil
+	}
+	s.mu.Unlock()
+}
+
+// contentSubtype must be lowercase
+// cannot return nil
+func (s *Server) getCodec(contentSubtype string) baseCodec {
+	if s.opts.codec != nil {
+		return s.opts.codec
+	}
+	if contentSubtype == "" {
+		return encoding.GetCodec(proto.Name)
+	}
+	codec := encoding.GetCodec(contentSubtype)
+	if codec == nil {
+		return encoding.GetCodec(proto.Name)
+	}
+	return codec
+}
+
+// SetHeader sets the header metadata to be sent from the server to the client.
+// The context provided must be the context passed to the server's handler.
+//
+// Streaming RPCs should prefer the SetHeader method of the ServerStream.
+//
+// When called multiple times, all the provided metadata will be merged.  All
+// the metadata will be sent out when one of the following happens:
+//
+//   - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
+//   - The first response message is sent.  For unary handlers, this occurs when
+//     the handler returns; for streaming handlers, this can happen when stream's
+//     SendMsg method is called.
+//   - An RPC status is sent out (error or success).  This occurs when the handler
+//     returns.
+//
+// SetHeader will fail if called after any of the events above.
+//
+// The error returned is compatible with the status package.  However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
+func SetHeader(ctx context.Context, md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	stream := ServerTransportStreamFromContext(ctx)
+	if stream == nil {
+		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	}
+	return stream.SetHeader(md)
+}
+
+// SendHeader sends header metadata. It may be called at most once, and may not
+// be called after any event that causes headers to be sent (see SetHeader for
+// a complete list).  The provided md and headers set by SetHeader() will be
+// sent.
+//
+// The error returned is compatible with the status package.  However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
+func SendHeader(ctx context.Context, md metadata.MD) error {
+	stream := ServerTransportStreamFromContext(ctx)
+	if stream == nil {
+		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	}
+	if err := stream.SendHeader(md); err != nil {
+		return toRPCErr(err)
+	}
+	return nil
+}
+
+// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
+// When called more than once, all the provided metadata will be merged.
+//
+// The error returned is compatible with the status package.  However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
+func SetTrailer(ctx context.Context, md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	stream := ServerTransportStreamFromContext(ctx)
+	if stream == nil {
+		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	}
+	return stream.SetTrailer(md)
+}
+
+// Method returns the method string for the server context.  The returned
+// string is in the format of "/service/method".
+func Method(ctx context.Context) (string, bool) {
+	s := ServerTransportStreamFromContext(ctx)
+	if s == nil {
+		return "", false
+	}
+	return s.Method(), true
+}
+
+type channelzServer struct {
+	s *Server
+}
+
+func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
+	return c.s.channelzMetric()
+}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
new file mode 100644
index 000000000..f22acace4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -0,0 +1,406 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/internal"
+	internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+//
+// Deprecated: Users should not use this struct. Service config should be received
+// through name resolver, as specified here
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+type MethodConfig = internalserviceconfig.MethodConfig
+
+type lbConfig struct {
+	name string
+	cfg  serviceconfig.LoadBalancingConfig
+}
+
+// ServiceConfig is provided by the service provider and contains parameters for how
+// clients that connect to the service should behave.
+//
+// Deprecated: Users should not use this struct. Service config should be received
+// through name resolver, as specified here
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+type ServiceConfig struct {
+	serviceconfig.Config
+
+	// LB is the load balancer the service providers recommends.  This is
+	// deprecated; lbConfigs is preferred.  If lbConfig and LB are both present,
+	// lbConfig will be used.
+	LB *string
+
+	// lbConfig is the service config's load balancing configuration.  If
+	// lbConfig and LB are both present, lbConfig will be used.
+	lbConfig *lbConfig
+
+	// Methods contains a map for the methods in this service.  If there is an
+	// exact match for a method (i.e. /service/method) in the map, use the
+	// corresponding MethodConfig.  If there's no exact match, look for the
+	// default config for the service (/service/) and use the corresponding
+	// MethodConfig if it exists.  Otherwise, the method has no MethodConfig to
+	// use.
+	Methods map[string]MethodConfig
+
+	// If a retryThrottlingPolicy is provided, gRPC will automatically throttle
+	// retry attempts and hedged RPCs when the client’s ratio of failures to
+	// successes exceeds a threshold.
+	//
+	// For each server name, the gRPC client will maintain a token_count which is
+	// initially set to maxTokens, and can take values between 0 and maxTokens.
+	//
+	// Every outgoing RPC (regardless of service or method invoked) will change
+	// token_count as follows:
+	//
+	//   - Every failed RPC will decrement the token_count by 1.
+	//   - Every successful RPC will increment the token_count by tokenRatio.
+	//
+	// If token_count is less than or equal to maxTokens / 2, then RPCs will not
+	// be retried and hedged RPCs will not be sent.
+	retryThrottling *retryThrottlingPolicy
+	// healthCheckConfig must be set as one of the requirement to enable LB channel
+	// health check.
+	healthCheckConfig *healthCheckConfig
+	// rawJSONString stores service config json string that get parsed into
+	// this service config struct.
+	rawJSONString string
+}
+
+// healthCheckConfig defines the go-native version of the LB channel health check config.
+type healthCheckConfig struct {
+	// serviceName is the service name to use in the health-checking request.
+	ServiceName string
+}
+
+type jsonRetryPolicy struct {
+	MaxAttempts          int
+	InitialBackoff       string
+	MaxBackoff           string
+	BackoffMultiplier    float64
+	RetryableStatusCodes []codes.Code
+}
+
+// retryThrottlingPolicy defines the go-native version of the retry throttling
+// policy defined by the service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type retryThrottlingPolicy struct {
+	// The number of tokens starts at maxTokens. The token_count will always be
+	// between 0 and maxTokens.
+	//
+	// This field is required and must be greater than zero.
+	MaxTokens float64
+	// The amount of tokens to add on each successful RPC. Typically this will
+	// be some number between 0 and 1, e.g., 0.1.
+	//
+	// This field is required and must be greater than zero. Up to 3 decimal
+	// places are supported.
+	TokenRatio float64
+}
+
+func parseDuration(s *string) (*time.Duration, error) {
+	if s == nil {
+		return nil, nil
+	}
+	if !strings.HasSuffix(*s, "s") {
+		return nil, fmt.Errorf("malformed duration %q", *s)
+	}
+	ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
+	if len(ss) > 2 {
+		return nil, fmt.Errorf("malformed duration %q", *s)
+	}
+	// hasDigits is set if either the whole or fractional part of the number is
+	// present, since both are optional but one is required.
+	hasDigits := false
+	var d time.Duration
+	if len(ss[0]) > 0 {
+		i, err := strconv.ParseInt(ss[0], 10, 32)
+		if err != nil {
+			return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
+		}
+		d = time.Duration(i) * time.Second
+		hasDigits = true
+	}
+	if len(ss) == 2 && len(ss[1]) > 0 {
+		if len(ss[1]) > 9 {
+			return nil, fmt.Errorf("malformed duration %q", *s)
+		}
+		f, err := strconv.ParseInt(ss[1], 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
+		}
+		for i := 9; i > len(ss[1]); i-- {
+			f *= 10
+		}
+		d += time.Duration(f)
+		hasDigits = true
+	}
+	if !hasDigits {
+		return nil, fmt.Errorf("malformed duration %q", *s)
+	}
+
+	return &d, nil
+}
+
+type jsonName struct {
+	Service string
+	Method  string
+}
+
+var (
+	errDuplicatedName             = errors.New("duplicated name")
+	errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'")
+)
+
+func (j jsonName) generatePath() (string, error) {
+	if j.Service == "" {
+		if j.Method != "" {
+			return "", errEmptyServiceNonEmptyMethod
+		}
+		return "", nil
+	}
+	res := "/" + j.Service + "/"
+	if j.Method != "" {
+		res += j.Method
+	}
+	return res, nil
+}
+
+// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
+type jsonMC struct {
+	Name                    *[]jsonName
+	WaitForReady            *bool
+	Timeout                 *string
+	MaxRequestMessageBytes  *int64
+	MaxResponseMessageBytes *int64
+	RetryPolicy             *jsonRetryPolicy
+}
+
+// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
+type jsonSC struct {
+	LoadBalancingPolicy *string
+	LoadBalancingConfig *internalserviceconfig.BalancerConfig
+	MethodConfig        *[]jsonMC
+	RetryThrottling     *retryThrottlingPolicy
+	HealthCheckConfig   *healthCheckConfig
+}
+
+func init() {
+	internal.ParseServiceConfig = parseServiceConfig
+}
+func parseServiceConfig(js string) *serviceconfig.ParseResult {
+	if len(js) == 0 {
+		return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
+	}
+	var rsc jsonSC
+	err := json.Unmarshal([]byte(js), &rsc)
+	if err != nil {
+		logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
+		return &serviceconfig.ParseResult{Err: err}
+	}
+	sc := ServiceConfig{
+		LB:                rsc.LoadBalancingPolicy,
+		Methods:           make(map[string]MethodConfig),
+		retryThrottling:   rsc.RetryThrottling,
+		healthCheckConfig: rsc.HealthCheckConfig,
+		rawJSONString:     js,
+	}
+	if c := rsc.LoadBalancingConfig; c != nil {
+		sc.lbConfig = &lbConfig{
+			name: c.Name,
+			cfg:  c.Config,
+		}
+	}
+
+	if rsc.MethodConfig == nil {
+		return &serviceconfig.ParseResult{Config: &sc}
+	}
+
+	paths := map[string]struct{}{}
+	for _, m := range *rsc.MethodConfig {
+		if m.Name == nil {
+			continue
+		}
+		d, err := parseDuration(m.Timeout)
+		if err != nil {
+			logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
+			return &serviceconfig.ParseResult{Err: err}
+		}
+
+		mc := MethodConfig{
+			WaitForReady: m.WaitForReady,
+			Timeout:      d,
+		}
+		if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
+			logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
+			return &serviceconfig.ParseResult{Err: err}
+		}
+		if m.MaxRequestMessageBytes != nil {
+			if *m.MaxRequestMessageBytes > int64(maxInt) {
+				mc.MaxReqSize = newInt(maxInt)
+			} else {
+				mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
+			}
+		}
+		if m.MaxResponseMessageBytes != nil {
+			if *m.MaxResponseMessageBytes > int64(maxInt) {
+				mc.MaxRespSize = newInt(maxInt)
+			} else {
+				mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
+			}
+		}
+		for i, n := range *m.Name {
+			path, err := n.generatePath()
+			if err != nil {
+				logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
+				return &serviceconfig.ParseResult{Err: err}
+			}
+
+			if _, ok := paths[path]; ok {
+				err = errDuplicatedName
+				logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
+				return &serviceconfig.ParseResult{Err: err}
+			}
+			paths[path] = struct{}{}
+			sc.Methods[path] = mc
+		}
+	}
+
+	if sc.retryThrottling != nil {
+		if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)}
+		}
+		if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)}
+		}
+	}
+	return &serviceconfig.ParseResult{Config: &sc}
+}
+
+func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) {
+	if jrp == nil {
+		return nil, nil
+	}
+	ib, err := parseDuration(&jrp.InitialBackoff)
+	if err != nil {
+		return nil, err
+	}
+	mb, err := parseDuration(&jrp.MaxBackoff)
+	if err != nil {
+		return nil, err
+	}
+
+	if jrp.MaxAttempts <= 1 ||
+		*ib <= 0 ||
+		*mb <= 0 ||
+		jrp.BackoffMultiplier <= 0 ||
+		len(jrp.RetryableStatusCodes) == 0 {
+		logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
+		return nil, nil
+	}
+
+	rp := &internalserviceconfig.RetryPolicy{
+		MaxAttempts:          jrp.MaxAttempts,
+		InitialBackoff:       *ib,
+		MaxBackoff:           *mb,
+		BackoffMultiplier:    jrp.BackoffMultiplier,
+		RetryableStatusCodes: make(map[codes.Code]bool),
+	}
+	if rp.MaxAttempts > 5 {
+		// TODO(retry): Make the max maxAttempts configurable.
+		rp.MaxAttempts = 5
+	}
+	for _, code := range jrp.RetryableStatusCodes {
+		rp.RetryableStatusCodes[code] = true
+	}
+	return rp, nil
+}
+
+func min(a, b *int) *int {
+	if *a < *b {
+		return a
+	}
+	return b
+}
+
+func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
+	if mcMax == nil && doptMax == nil {
+		return &defaultVal
+	}
+	if mcMax != nil && doptMax != nil {
+		return min(mcMax, doptMax)
+	}
+	if mcMax != nil {
+		return mcMax
+	}
+	return doptMax
+}
+
+func newInt(b int) *int {
+	return &b
+}
+
+func init() {
+	internal.EqualServiceConfigForTesting = equalServiceConfig
+}
+
+// equalServiceConfig compares two configs. The rawJSONString field is ignored,
+// because they may diff in white spaces.
+//
+// If any of them is NOT *ServiceConfig, return false.
+func equalServiceConfig(a, b serviceconfig.Config) bool {
+	if a == nil && b == nil {
+		return true
+	}
+	aa, ok := a.(*ServiceConfig)
+	if !ok {
+		return false
+	}
+	bb, ok := b.(*ServiceConfig)
+	if !ok {
+		return false
+	}
+	aaRaw := aa.rawJSONString
+	aa.rawJSONString = ""
+	bbRaw := bb.rawJSONString
+	bb.rawJSONString = ""
+	defer func() {
+		aa.rawJSONString = aaRaw
+		bb.rawJSONString = bbRaw
+	}()
+	// Using reflect.DeepEqual instead of cmp.Equal because many balancer
+	// configs are unexported, and cmp.Equal cannot compare unexported fields
+	// from unexported structs.
+	return reflect.DeepEqual(aa, bb)
+}
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
new file mode 100644
index 000000000..35e7a20a0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig defines types and methods for operating on gRPC
+// service configs.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package serviceconfig
+
+// Config represents an opaque data structure holding a service config.
+type Config interface {
+	isServiceConfig()
+}
+
+// LoadBalancingConfig represents an opaque data structure holding a load
+// balancing config.
+type LoadBalancingConfig interface {
+	isLoadBalancingConfig()
+}
+
+// ParseResult contains a service config or an error.  Exactly one must be
+// non-nil.
+type ParseResult struct {
+	Config Config
+	Err    error
+}
diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
new file mode 100644
index 000000000..dc03731e4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/handlers.go
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+	"context"
+	"net"
+)
+
+// ConnTagInfo defines the relevant information needed by connection context tagger.
+type ConnTagInfo struct {
+	// RemoteAddr is the remote address of the corresponding connection.
+	RemoteAddr net.Addr
+	// LocalAddr is the local address of the corresponding connection.
+	LocalAddr net.Addr
+}
+
+// RPCTagInfo defines the relevant information needed by RPC context tagger.
+type RPCTagInfo struct {
+	// FullMethodName is the RPC method in the format of /package.service/method.
+	FullMethodName string
+	// FailFast indicates if this RPC is failfast.
+	// This field is only valid on client side, it's always false on server side.
+	FailFast bool
+}
+
+// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
+type Handler interface {
+	// TagRPC can attach some information to the given context.
+	// The context used for the rest lifetime of the RPC will be derived from
+	// the returned context.
+	TagRPC(context.Context, *RPCTagInfo) context.Context
+	// HandleRPC processes the RPC stats.
+	HandleRPC(context.Context, RPCStats)
+
+	// TagConn can attach some information to the given context.
+	// The returned context will be used for stats handling.
+	// For conn stats handling, the context used in HandleConn for this
+	// connection will be derived from the context returned.
+	// For RPC stats handling,
+	//  - On server side, the context used in HandleRPC for all RPCs on this
+	// connection will be derived from the context returned.
+	//  - On client side, the context is not derived from the context returned.
+	TagConn(context.Context, *ConnTagInfo) context.Context
+	// HandleConn processes the Conn stats.
+	HandleConn(context.Context, ConnStats)
+}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
new file mode 100644
index 000000000..0285dcc6a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -0,0 +1,319 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats is for collecting and reporting various network and RPC stats.
+// This package is for monitoring purpose only. All fields are read-only.
+// All APIs are experimental.
+package stats // import "google.golang.org/grpc/stats"
+
+import (
+	"context"
+	"net"
+	"time"
+
+	"google.golang.org/grpc/metadata"
+)
+
+// RPCStats contains stats information about RPCs.
+type RPCStats interface {
+	isRPCStats()
+	// IsClient returns true if this RPCStats is from client side.
+	IsClient() bool
+}
+
+// Begin contains stats when an RPC attempt begins.
+// FailFast is only valid if this Begin is from client side.
+type Begin struct {
+	// Client is true if this Begin is from client side.
+	Client bool
+	// BeginTime is the time when the RPC attempt begins.
+	BeginTime time.Time
+	// FailFast indicates if this RPC is failfast.
+	FailFast bool
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+	// IsTransparentRetryAttempt indicates whether this attempt was initiated
+	// due to transparently retrying a previous attempt.
+	IsTransparentRetryAttempt bool
+}
+
+// IsClient indicates if the stats information is from client side.
+func (s *Begin) IsClient() bool { return s.Client }
+
+func (s *Begin) isRPCStats() {}
+
+// InPayload contains the information for an incoming payload.
+type InPayload struct {
+	// Client is true if this InPayload is from client side.
+	Client bool
+	// Payload is the payload with original type.
+	Payload interface{}
+	// Data is the serialized message payload.
+	Data []byte
+	// Length is the length of uncompressed data.
+	Length int
+	// WireLength is the length of data on wire (compressed, signed, encrypted).
+	WireLength int
+	// RecvTime is the time when the payload is received.
+	RecvTime time.Time
+}
+
+// IsClient indicates if the stats information is from client side.
+func (s *InPayload) IsClient() bool { return s.Client }
+
+func (s *InPayload) isRPCStats() {}
+
+// InHeader contains stats when a header is received.
+type InHeader struct {
+	// Client is true if this InHeader is from client side.
+	Client bool
+	// WireLength is the wire length of header.
+	WireLength int
+	// Compression is the compression algorithm used for the RPC.
+	Compression string
+	// Header contains the header metadata received.
+	Header metadata.MD
+
+	// The following fields are valid only if Client is false.
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+	// RemoteAddr is the remote address of the corresponding connection.
+	RemoteAddr net.Addr
+	// LocalAddr is the local address of the corresponding connection.
+	LocalAddr net.Addr
+}
+
+// IsClient indicates if the stats information is from client side.
+func (s *InHeader) IsClient() bool { return s.Client }
+
+func (s *InHeader) isRPCStats() {}
+
+// InTrailer contains stats when a trailer is received.
+type InTrailer struct {
+	// Client is true if this InTrailer is from client side.
+	Client bool
+	// WireLength is the wire length of trailer.
+	WireLength int
+	// Trailer contains the trailer metadata received from the server. This
+	// field is only valid if this InTrailer is from the client side.
+	Trailer metadata.MD
+}
+
+// IsClient indicates if the stats information is from client side.
+func (s *InTrailer) IsClient() bool { return s.Client }
+
+func (s *InTrailer) isRPCStats() {}
+
+// OutPayload contains the information for an outgoing payload.
+type OutPayload struct {
+	// Client is true if this OutPayload is from client side.
+	Client bool
+	// Payload is the payload with original type.
+	Payload interface{}
+	// Data is the serialized message payload.
+	Data []byte
+	// Length is the length of uncompressed data.
+	Length int
+	// WireLength is the length of data on wire (compressed, signed, encrypted).
+	WireLength int
+	// SentTime is the time when the payload is sent.
+	SentTime time.Time
+}
+
+// IsClient indicates if this stats information is from client side.
+func (s *OutPayload) IsClient() bool { return s.Client }
+
+func (s *OutPayload) isRPCStats() {}
+
+// OutHeader contains stats when a header is sent.
+type OutHeader struct {
+	// Client is true if this OutHeader is from client side.
+	Client bool
+	// Compression is the compression algorithm used for the RPC.
+	Compression string
+	// Header contains the header metadata sent.
+	Header metadata.MD
+
+	// The following fields are valid only if Client is true.
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+	// RemoteAddr is the remote address of the corresponding connection.
+	RemoteAddr net.Addr
+	// LocalAddr is the local address of the corresponding connection.
+	LocalAddr net.Addr
+}
+
+// IsClient indicates if this stats information is from client side.
+func (s *OutHeader) IsClient() bool { return s.Client }
+
+func (s *OutHeader) isRPCStats() {}
+
+// OutTrailer contains stats when a trailer is sent.
+type OutTrailer struct {
+	// Client is true if this OutTrailer is from client side.
+	Client bool
+	// WireLength is the wire length of trailer.
+	//
+	// Deprecated: This field is never set. The length is not known when this message is
+	// emitted because the trailer fields are compressed with hpack after that.
+	WireLength int
+	// Trailer contains the trailer metadata sent to the client. This
+	// field is only valid if this OutTrailer is from the server side.
+	Trailer metadata.MD
+}
+
+// IsClient indicates if this stats information is from client side.
+func (s *OutTrailer) IsClient() bool { return s.Client }
+
+func (s *OutTrailer) isRPCStats() {}
+
+// End contains stats when an RPC ends.
+type End struct {
+	// Client is true if this End is from client side.
+	Client bool
+	// BeginTime is the time when the RPC began.
+	BeginTime time.Time
+	// EndTime is the time when the RPC ends.
+	EndTime time.Time
+	// Trailer contains the trailer metadata received from the server. This
+	// field is only valid if this End is from the client side.
+	// Deprecated: use Trailer in InTrailer instead.
+	Trailer metadata.MD
+	// Error is the error the RPC ended with. It is an error generated from
+	// status.Status and can be converted back to status.Status using
+	// status.FromError if non-nil.
+	Error error
+}
+
+// IsClient indicates if this is from client side.
+func (s *End) IsClient() bool { return s.Client }
+
+func (s *End) isRPCStats() {}
+
+// ConnStats contains stats information about connections.
+type ConnStats interface {
+	isConnStats()
+	// IsClient returns true if this ConnStats is from client side.
+	IsClient() bool
+}
+
+// ConnBegin contains the stats of a connection when it is established.
+type ConnBegin struct {
+	// Client is true if this ConnBegin is from client side.
+	Client bool
+}
+
+// IsClient indicates if this is from client side.
+func (s *ConnBegin) IsClient() bool { return s.Client }
+
+func (s *ConnBegin) isConnStats() {}
+
+// ConnEnd contains the stats of a connection when it ends.
+type ConnEnd struct {
+	// Client is true if this ConnEnd is from client side.
+	Client bool
+}
+
+// IsClient indicates if this is from client side.
+func (s *ConnEnd) IsClient() bool { return s.Client }
+
+func (s *ConnEnd) isConnStats() {}
+
+type incomingTagsKey struct{}
+type outgoingTagsKey struct{}
+
+// SetTags attaches stats tagging data to the context, which will be sent in
+// the outgoing RPC with the header grpc-tags-bin.  Subsequent calls to
+// SetTags will overwrite the values from earlier calls.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release.  New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func SetTags(ctx context.Context, b []byte) context.Context {
+	return context.WithValue(ctx, outgoingTagsKey{}, b)
+}
+
+// Tags returns the tags from the context for the inbound RPC.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release.  New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func Tags(ctx context.Context) []byte {
+	b, _ := ctx.Value(incomingTagsKey{}).([]byte)
+	return b
+}
+
+// SetIncomingTags attaches stats tagging data to the context, to be read by
+// the application (not sent in outgoing RPCs).
+//
+// This is intended for gRPC-internal use ONLY.
+func SetIncomingTags(ctx context.Context, b []byte) context.Context {
+	return context.WithValue(ctx, incomingTagsKey{}, b)
+}
+
+// OutgoingTags returns the tags from the context for the outbound RPC.
+//
+// This is intended for gRPC-internal use ONLY.
+func OutgoingTags(ctx context.Context) []byte {
+	b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
+	return b
+}
+
+type incomingTraceKey struct{}
+type outgoingTraceKey struct{}
+
+// SetTrace attaches stats tagging data to the context, which will be sent in
+// the outgoing RPC with the header grpc-trace-bin.  Subsequent calls to
+// SetTrace will overwrite the values from earlier calls.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release.  New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func SetTrace(ctx context.Context, b []byte) context.Context {
+	return context.WithValue(ctx, outgoingTraceKey{}, b)
+}
+
+// Trace returns the trace from the context for the inbound RPC.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release.  New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func Trace(ctx context.Context) []byte {
+	b, _ := ctx.Value(incomingTraceKey{}).([]byte)
+	return b
+}
+
+// SetIncomingTrace attaches stats tagging data to the context, to be read by
+// the application (not sent in outgoing RPCs).  It is intended for
+// gRPC-internal use.
+func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
+	return context.WithValue(ctx, incomingTraceKey{}, b)
+}
+
+// OutgoingTrace returns the trace from the context for the outbound RPC.  It is
+// intended for gRPC-internal use.
+func OutgoingTrace(ctx context.Context) []byte {
+	b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
+	return b
+}
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
new file mode 100644
index 000000000..623be39f2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -0,0 +1,135 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package status implements errors returned by gRPC.  These errors are
+// serialized and transmitted on the wire between server and client, and allow
+// for additional data to be transmitted via the Details field in the status
+// proto.  gRPC service handlers should return an error created by this
+// package, and gRPC clients should expect a corresponding error to be
+// returned from the RPC call.
+//
+// This package upholds the invariants that a non-nil error may not
+// contain an OK code, and an OK code must result in a nil error.
+package status
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	spb "google.golang.org/genproto/googleapis/rpc/status"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/internal/status"
+)
+
+// Status references google.golang.org/grpc/internal/status. It represents an
+// RPC status code, message, and details.  It is immutable and should be
+// created with New, Newf, or FromProto.
+// https://godoc.org/google.golang.org/grpc/internal/status
+type Status = status.Status
+
+// New returns a Status representing c and msg.
+func New(c codes.Code, msg string) *Status {
+	return status.New(c, msg)
+}
+
+// Newf returns New(c, fmt.Sprintf(format, a...)).
+func Newf(c codes.Code, format string, a ...interface{}) *Status {
+	return New(c, fmt.Sprintf(format, a...))
+}
+
+// Error returns an error representing c and msg.  If c is OK, returns nil.
+func Error(c codes.Code, msg string) error {
+	return New(c, msg).Err()
+}
+
+// Errorf returns Error(c, fmt.Sprintf(format, a...)).
+func Errorf(c codes.Code, format string, a ...interface{}) error {
+	return Error(c, fmt.Sprintf(format, a...))
+}
+
+// ErrorProto returns an error representing s.  If s.Code is OK, returns nil.
+func ErrorProto(s *spb.Status) error {
+	return FromProto(s).Err()
+}
+
+// FromProto returns a Status representing s.
+func FromProto(s *spb.Status) *Status {
+	return status.FromProto(s)
+}
+
+// FromError returns a Status representation of err.
+//
+//   - If err was produced by this package or implements the method `GRPCStatus()
+//     *Status`, the appropriate Status is returned.
+//
+//   - If err is nil, a Status is returned with codes.OK and no message.
+//
+//   - Otherwise, err is an error not compatible with this package.  In this
+//     case, a Status is returned with codes.Unknown and err's Error() message,
+//     and ok is false.
+func FromError(err error) (s *Status, ok bool) {
+	if err == nil {
+		return nil, true
+	}
+	if se, ok := err.(interface {
+		GRPCStatus() *Status
+	}); ok {
+		return se.GRPCStatus(), true
+	}
+	return New(codes.Unknown, err.Error()), false
+}
+
+// Convert is a convenience function which removes the need to handle the
+// boolean return value from FromError.
+func Convert(err error) *Status {
+	s, _ := FromError(err)
+	return s
+}
+
+// Code returns the Code of the error if it is a Status error, codes.OK if err
+// is nil, or codes.Unknown otherwise.
+func Code(err error) codes.Code {
+	// Don't use FromError to avoid allocation of OK status.
+	if err == nil {
+		return codes.OK
+	}
+	if se, ok := err.(interface {
+		GRPCStatus() *Status
+	}); ok {
+		return se.GRPCStatus().Code()
+	}
+	return codes.Unknown
+}
+
+// FromContextError converts a context error or wrapped context error into a
+// Status.  It returns a Status with codes.OK if err is nil, or a Status with
+// codes.Unknown if err is non-nil and not a context error.
+func FromContextError(err error) *Status {
+	if err == nil {
+		return nil
+	}
+	if errors.Is(err, context.DeadlineExceeded) {
+		return New(codes.DeadlineExceeded, err.Error())
+	}
+	if errors.Is(err, context.Canceled) {
+		return New(codes.Canceled, err.Error())
+	}
+	return New(codes.Unknown, err.Error())
+}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
new file mode 100644
index 000000000..93231af2a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -0,0 +1,1740 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"errors"
+	"io"
+	"math"
+	"strconv"
+	"sync"
+	"time"
+
+	"golang.org/x/net/trace"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/internal/balancerload"
+	"google.golang.org/grpc/internal/binarylog"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcrand"
+	"google.golang.org/grpc/internal/grpcutil"
+	imetadata "google.golang.org/grpc/internal/metadata"
+	iresolver "google.golang.org/grpc/internal/resolver"
+	"google.golang.org/grpc/internal/serviceconfig"
+	istatus "google.golang.org/grpc/internal/status"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+)
+
+// StreamHandler defines the handler called by gRPC server to complete the
+// execution of a streaming RPC.
+//
+// If a StreamHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
+type StreamHandler func(srv interface{}, stream ServerStream) error
+
+// StreamDesc represents a streaming RPC service's method specification.  Used
+// on the server when registering services and on the client when initiating
+// new streams.
+type StreamDesc struct {
+	// StreamName and Handler are only used when registering handlers on a
+	// server.
+	StreamName string        // the name of the method excluding the service
+	Handler    StreamHandler // the handler called for the method
+
+	// ServerStreams and ClientStreams are used for registering handlers on a
+	// server as well as defining RPC behavior when passed to NewClientStream
+	// and ClientConn.NewStream.  At least one must be true.
+	ServerStreams bool // indicates the server can perform streaming sends
+	ClientStreams bool // indicates the client can perform streaming sends
+}
+
+// Stream defines the common interface a client or server stream has to satisfy.
+//
+// Deprecated: See ClientStream and ServerStream documentation instead.
+type Stream interface {
+	// Deprecated: See ClientStream and ServerStream documentation instead.
+	Context() context.Context
+	// Deprecated: See ClientStream and ServerStream documentation instead.
+	SendMsg(m interface{}) error
+	// Deprecated: See ClientStream and ServerStream documentation instead.
+	RecvMsg(m interface{}) error
+}
+
+// ClientStream defines the client-side behavior of a streaming RPC.
+//
+// All errors returned from ClientStream methods are compatible with the
+// status package.
+type ClientStream interface {
+	// Header returns the header metadata received from the server if there
+	// is any. It blocks if the metadata is not ready to read.
+	Header() (metadata.MD, error)
+	// Trailer returns the trailer metadata from the server, if there is any.
+	// It must only be called after stream.CloseAndRecv has returned, or
+	// stream.Recv has returned a non-nil error (including io.EOF).
+	Trailer() metadata.MD
+	// CloseSend closes the send direction of the stream. It closes the stream
+	// when non-nil error is met. It is also not safe to call CloseSend
+	// concurrently with SendMsg.
+	CloseSend() error
+	// Context returns the context for this stream.
+	//
+	// It should not be called until after Header or RecvMsg has returned. Once
+	// called, subsequent client-side retries are disabled.
+	Context() context.Context
+	// SendMsg is generally called by generated code. On error, SendMsg aborts
+	// the stream. If the error was generated by the client, the status is
+	// returned directly; otherwise, io.EOF is returned and the status of
+	// the stream may be discovered using RecvMsg.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the server. An
+	// untimely stream closure may result in lost messages. To ensure delivery,
+	// users should ensure the RPC completed successfully using RecvMsg.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines. It is also
+	// not safe to call CloseSend concurrently with SendMsg.
+	SendMsg(m interface{}) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the stream completes successfully. On
+	// any other error, the stream is aborted and the error contains the RPC
+	// status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m interface{}) error
+}
+
+// NewStream creates a new Stream for the client side. This is typically
+// called by generated code. ctx is used for the lifetime of the stream.
+//
+// To ensure resources are not leaked due to the stream returned, one of the following
+// actions must be performed:
+//
+//  1. Call Close on the ClientConn.
+//  2. Cancel the context provided.
+//  3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
+//     client-streaming RPC, for instance, might use the helper function
+//     CloseAndRecv (note that CloseSend does not Recv, therefore is not
+//     guaranteed to release all resources).
+//  4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
+//
+// If none of the above happen, a goroutine and a context will be leaked, and grpc
+// will not call the optionally-configured stats handler with a stats.End message.
+func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+	// allow interceptor to see all applicable call options, which means those
+	// configured as defaults from dial option as well as per-call options
+	opts = combine(cc.dopts.callOptions, opts)
+
+	if cc.dopts.streamInt != nil {
+		return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
+	}
+	return newClientStream(ctx, desc, cc, method, opts...)
+}
+
+// NewClientStream is a wrapper for ClientConn.NewStream.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+	return cc.NewStream(ctx, desc, method, opts...)
+}
+
+func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+	if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+		if err := imetadata.Validate(md); err != nil {
+			return nil, status.Error(codes.Internal, err.Error())
+		}
+	}
+	if channelz.IsOn() {
+		cc.incrCallsStarted()
+		defer func() {
+			if err != nil {
+				cc.incrCallsFailed()
+			}
+		}()
+	}
+	// Provide an opportunity for the first RPC to see the first service config
+	// provided by the resolver.
+	if err := cc.waitForResolvedAddrs(ctx); err != nil {
+		return nil, err
+	}
+
+	var mc serviceconfig.MethodConfig
+	var onCommit func()
+	var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
+	}
+
+	rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
+	rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
+	if err != nil {
+		if st, ok := status.FromError(err); ok {
+			// Restrict the code to the list allowed by gRFC A54.
+			if istatus.IsRestrictedControlPlaneCode(st) {
+				err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
+			}
+			return nil, err
+		}
+		return nil, toRPCErr(err)
+	}
+
+	if rpcConfig != nil {
+		if rpcConfig.Context != nil {
+			ctx = rpcConfig.Context
+		}
+		mc = rpcConfig.MethodConfig
+		onCommit = rpcConfig.OnCommitted
+		if rpcConfig.Interceptor != nil {
+			rpcInfo.Context = nil
+			ns := newStream
+			newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+				cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns)
+				if err != nil {
+					return nil, toRPCErr(err)
+				}
+				return cs, nil
+			}
+		}
+	}
+
+	return newStream(ctx, func() {})
+}
+
+func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
+	c := defaultCallInfo()
+	if mc.WaitForReady != nil {
+		c.failFast = !*mc.WaitForReady
+	}
+
+	// Possible context leak:
+	// The cancel function for the child context we create will only be called
+	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
+	// an error is generated by SendMsg.
+	// https://github.com/grpc/grpc-go/issues/1818.
+	var cancel context.CancelFunc
+	if mc.Timeout != nil && *mc.Timeout >= 0 {
+		ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
+	} else {
+		ctx, cancel = context.WithCancel(ctx)
+	}
+	defer func() {
+		if err != nil {
+			cancel()
+		}
+	}()
+
+	for _, o := range opts {
+		if err := o.before(c); err != nil {
+			return nil, toRPCErr(err)
+		}
+	}
+	c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
+	c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+	if err := setCallInfoCodec(c); err != nil {
+		return nil, err
+	}
+
+	callHdr := &transport.CallHdr{
+		Host:           cc.authority,
+		Method:         method,
+		ContentSubtype: c.contentSubtype,
+		DoneFunc:       doneFunc,
+	}
+
+	// Set our outgoing compression according to the UseCompressor CallOption, if
+	// set.  In that case, also find the compressor from the encoding package.
+	// Otherwise, use the compressor configured by the WithCompressor DialOption,
+	// if set.
+	var cp Compressor
+	var comp encoding.Compressor
+	if ct := c.compressorType; ct != "" {
+		callHdr.SendCompress = ct
+		if ct != encoding.Identity {
+			comp = encoding.GetCompressor(ct)
+			if comp == nil {
+				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+			}
+		}
+	} else if cc.dopts.cp != nil {
+		callHdr.SendCompress = cc.dopts.cp.Type()
+		cp = cc.dopts.cp
+	}
+	if c.creds != nil {
+		callHdr.Creds = c.creds
+	}
+
+	cs := &clientStream{
+		callHdr:      callHdr,
+		ctx:          ctx,
+		methodConfig: &mc,
+		opts:         opts,
+		callInfo:     c,
+		cc:           cc,
+		desc:         desc,
+		codec:        c.codec,
+		cp:           cp,
+		comp:         comp,
+		cancel:       cancel,
+		firstAttempt: true,
+		onCommit:     onCommit,
+	}
+	if !cc.dopts.disableRetry {
+		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
+	}
+	if ml := binarylog.GetMethodLogger(method); ml != nil {
+		cs.binlogs = append(cs.binlogs, ml)
+	}
+	if cc.dopts.binaryLogger != nil {
+		if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil {
+			cs.binlogs = append(cs.binlogs, ml)
+		}
+	}
+
+	// Pick the transport to use and create a new stream on the transport.
+	// Assign cs.attempt upon success.
+	op := func(a *csAttempt) error {
+		if err := a.getTransport(); err != nil {
+			return err
+		}
+		if err := a.newStream(); err != nil {
+			return err
+		}
+		// Because this operation is always called either here (while creating
+		// the clientStream) or by the retry code while locked when replaying
+		// the operation, it is safe to access cs.attempt directly.
+		cs.attempt = a
+		return nil
+	}
+	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
+		return nil, err
+	}
+
+	if len(cs.binlogs) != 0 {
+		md, _ := metadata.FromOutgoingContext(ctx)
+		logEntry := &binarylog.ClientHeader{
+			OnClientSide: true,
+			Header:       md,
+			MethodName:   method,
+			Authority:    cs.cc.authority,
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			logEntry.Timeout = time.Until(deadline)
+			if logEntry.Timeout < 0 {
+				logEntry.Timeout = 0
+			}
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(logEntry)
+		}
+	}
+
+	if desc != unaryStreamDesc {
+		// Listen on cc and stream contexts to cleanup when the user closes the
+		// ClientConn or cancels the stream context.  In all other cases, an error
+		// should already be injected into the recv buffer by the transport, which
+		// the client will eventually receive, and then we will cancel the stream's
+		// context in clientStream.finish.
+		go func() {
+			select {
+			case <-cc.ctx.Done():
+				cs.finish(ErrClientConnClosing)
+			case <-ctx.Done():
+				cs.finish(toRPCErr(ctx.Err()))
+			}
+		}()
+	}
+	return cs, nil
+}
+
+// newAttemptLocked creates a new csAttempt without a transport or stream.
+func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
+	if err := cs.ctx.Err(); err != nil {
+		return nil, toRPCErr(err)
+	}
+	if err := cs.cc.ctx.Err(); err != nil {
+		return nil, ErrClientConnClosing
+	}
+
+	ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
+	method := cs.callHdr.Method
+	var beginTime time.Time
+	shs := cs.cc.dopts.copts.StatsHandlers
+	for _, sh := range shs {
+		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
+		beginTime = time.Now()
+		begin := &stats.Begin{
+			Client:                    true,
+			BeginTime:                 beginTime,
+			FailFast:                  cs.callInfo.failFast,
+			IsClientStream:            cs.desc.ClientStreams,
+			IsServerStream:            cs.desc.ServerStreams,
+			IsTransparentRetryAttempt: isTransparent,
+		}
+		sh.HandleRPC(ctx, begin)
+	}
+
+	var trInfo *traceInfo
+	if EnableTracing {
+		trInfo = &traceInfo{
+			tr: trace.New("grpc.Sent."+methodFamily(method), method),
+			firstLine: firstLine{
+				client: true,
+			},
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			trInfo.firstLine.deadline = time.Until(deadline)
+		}
+		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		ctx = trace.NewContext(ctx, trInfo.tr)
+	}
+
+	if cs.cc.parsedTarget.URL.Scheme == "xds" {
+		// Add extra metadata (metadata that will be added by transport) to context
+		// so the balancer can see them.
+		ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
+			"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
+		))
+	}
+
+	return &csAttempt{
+		ctx:           ctx,
+		beginTime:     beginTime,
+		cs:            cs,
+		dc:            cs.cc.dopts.dc,
+		statsHandlers: shs,
+		trInfo:        trInfo,
+	}, nil
+}
+
+func (a *csAttempt) getTransport() error {
+	cs := a.cs
+
+	var err error
+	a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+	if err != nil {
+		if de, ok := err.(dropError); ok {
+			err = de.error
+			a.drop = true
+		}
+		return err
+	}
+	if a.trInfo != nil {
+		a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr())
+	}
+	return nil
+}
+
+func (a *csAttempt) newStream() error {
+	cs := a.cs
+	cs.callHdr.PreviousAttempts = cs.numRetries
+
+	// Merge metadata stored in PickResult, if any, with existing call metadata.
+	// It is safe to overwrite the csAttempt's context here, since all state
+	// maintained in it are local to the attempt. When the attempt has to be
+	// retried, a new instance of csAttempt will be created.
+	if a.pickResult.Metatada != nil {
+		// We currently do not have a function it the metadata package which
+		// merges given metadata with existing metadata in a context. Existing
+		// function `AppendToOutgoingContext()` takes a variadic argument of key
+		// value pairs.
+		//
+		// TODO: Make it possible to retrieve key value pairs from metadata.MD
+		// in a form passable to AppendToOutgoingContext(), or create a version
+		// of AppendToOutgoingContext() that accepts a metadata.MD.
+		md, _ := metadata.FromOutgoingContext(a.ctx)
+		md = metadata.Join(md, a.pickResult.Metatada)
+		a.ctx = metadata.NewOutgoingContext(a.ctx, md)
+	}
+
+	s, err := a.t.NewStream(a.ctx, cs.callHdr)
+	if err != nil {
+		nse, ok := err.(*transport.NewStreamError)
+		if !ok {
+			// Unexpected.
+			return err
+		}
+
+		if nse.AllowTransparentRetry {
+			a.allowTransparentRetry = true
+		}
+
+		// Unwrap and convert error.
+		return toRPCErr(nse.Err)
+	}
+	a.s = s
+	a.p = &parser{r: s}
+	return nil
+}
+
+// clientStream implements a client side Stream.
+type clientStream struct {
+	callHdr  *transport.CallHdr
+	opts     []CallOption
+	callInfo *callInfo
+	cc       *ClientConn
+	desc     *StreamDesc
+
+	codec baseCodec
+	cp    Compressor
+	comp  encoding.Compressor
+
+	cancel context.CancelFunc // cancels all attempts
+
+	sentLast bool // sent an end stream
+
+	methodConfig *MethodConfig
+
+	ctx context.Context // the application's context, wrapped by stats/tracing
+
+	retryThrottler *retryThrottler // The throttler active when the RPC began.
+
+	binlogs []binarylog.MethodLogger
+	// serverHeaderBinlogged is a boolean for whether server header has been
+	// logged. Server header will be logged when the first time one of those
+	// happens: stream.Header(), stream.Recv().
+	//
+	// It's only read and used by Recv() and Header(), so it doesn't need to be
+	// synchronized.
+	serverHeaderBinlogged bool
+
+	mu                      sync.Mutex
+	firstAttempt            bool // if true, transparent retry is valid
+	numRetries              int  // exclusive of transparent retry attempt(s)
+	numRetriesSincePushback int  // retries since pushback; to reset backoff
+	finished                bool // TODO: replace with atomic cmpxchg or sync.Once?
+	// attempt is the active client stream attempt.
+	// The only place where it is written is the newAttemptLocked method and this method never writes nil.
+	// So, attempt can be nil only inside newClientStream function when clientStream is first created.
+	// One of the first things done after clientStream's creation, is to call newAttemptLocked which either
+	// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
+	// then newClientStream calls finish on the clientStream and returns. So, finish method is the only
+	// place where we need to check if the attempt is nil.
+	attempt *csAttempt
+	// TODO(hedging): hedging will have multiple attempts simultaneously.
+	committed  bool // active attempt committed for retry?
+	onCommit   func()
+	buffer     []func(a *csAttempt) error // operations to replay on retry
+	bufferSize int                        // current size of buffer
+}
+
+// csAttempt implements a single transport stream attempt within a
+// clientStream.
+type csAttempt struct {
+	ctx        context.Context
+	cs         *clientStream
+	t          transport.ClientTransport
+	s          *transport.Stream
+	p          *parser
+	pickResult balancer.PickResult
+
+	finished  bool
+	dc        Decompressor
+	decomp    encoding.Compressor
+	decompSet bool
+
+	mu sync.Mutex // guards trInfo.tr
+	// trInfo may be nil (if EnableTracing is false).
+	// trInfo.tr is set when created (if EnableTracing is true),
+	// and cleared when the finish method is called.
+	trInfo *traceInfo
+
+	statsHandlers []stats.Handler
+	beginTime     time.Time
+
+	// set for newStream errors that may be transparently retried
+	allowTransparentRetry bool
+	// set for pick errors that are returned as a status
+	drop bool
+}
+
+func (cs *clientStream) commitAttemptLocked() {
+	if !cs.committed && cs.onCommit != nil {
+		cs.onCommit()
+	}
+	cs.committed = true
+	cs.buffer = nil
+}
+
+func (cs *clientStream) commitAttempt() {
+	cs.mu.Lock()
+	cs.commitAttemptLocked()
+	cs.mu.Unlock()
+}
+
+// shouldRetry returns nil if the RPC should be retried; otherwise it returns
+// the error that should be returned by the operation.  If the RPC should be
+// retried, the bool indicates whether it is being retried transparently.
+func (a *csAttempt) shouldRetry(err error) (bool, error) {
+	cs := a.cs
+
+	if cs.finished || cs.committed || a.drop {
+		// RPC is finished or committed or was dropped by the picker; cannot retry.
+		return false, err
+	}
+	if a.s == nil && a.allowTransparentRetry {
+		return true, nil
+	}
+	// Wait for the trailers.
+	unprocessed := false
+	if a.s != nil {
+		<-a.s.Done()
+		unprocessed = a.s.Unprocessed()
+	}
+	if cs.firstAttempt && unprocessed {
+		// First attempt, stream unprocessed: transparently retry.
+		return true, nil
+	}
+	if cs.cc.dopts.disableRetry {
+		return false, err
+	}
+
+	pushback := 0
+	hasPushback := false
+	if a.s != nil {
+		if !a.s.TrailersOnly() {
+			return false, err
+		}
+
+		// TODO(retry): Move down if the spec changes to not check server pushback
+		// before considering this a failure for throttling.
+		sps := a.s.Trailer()["grpc-retry-pushback-ms"]
+		if len(sps) == 1 {
+			var e error
+			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
+				channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
+				cs.retryThrottler.throttle() // This counts as a failure for throttling.
+				return false, err
+			}
+			hasPushback = true
+		} else if len(sps) > 1 {
+			channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
+			cs.retryThrottler.throttle() // This counts as a failure for throttling.
+			return false, err
+		}
+	}
+
+	var code codes.Code
+	if a.s != nil {
+		code = a.s.Status().Code()
+	} else {
+		code = status.Code(err)
+	}
+
+	rp := cs.methodConfig.RetryPolicy
+	if rp == nil || !rp.RetryableStatusCodes[code] {
+		return false, err
+	}
+
+	// Note: the ordering here is important; we count this as a failure
+	// only if the code matched a retryable code.
+	if cs.retryThrottler.throttle() {
+		return false, err
+	}
+	if cs.numRetries+1 >= rp.MaxAttempts {
+		return false, err
+	}
+
+	var dur time.Duration
+	if hasPushback {
+		dur = time.Millisecond * time.Duration(pushback)
+		cs.numRetriesSincePushback = 0
+	} else {
+		fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
+		cur := float64(rp.InitialBackoff) * fact
+		if max := float64(rp.MaxBackoff); cur > max {
+			cur = max
+		}
+		dur = time.Duration(grpcrand.Int63n(int64(cur)))
+		cs.numRetriesSincePushback++
+	}
+
+	// TODO(dfawley): we could eagerly fail here if dur puts us past the
+	// deadline, but unsure if it is worth doing.
+	t := time.NewTimer(dur)
+	select {
+	case <-t.C:
+		cs.numRetries++
+		return false, nil
+	case <-cs.ctx.Done():
+		t.Stop()
+		return false, status.FromContextError(cs.ctx.Err()).Err()
+	}
+}
+
+// Returns nil if a retry was performed and succeeded; error otherwise.
+func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
+	for {
+		attempt.finish(toRPCErr(lastErr))
+		isTransparent, err := attempt.shouldRetry(lastErr)
+		if err != nil {
+			cs.commitAttemptLocked()
+			return err
+		}
+		cs.firstAttempt = false
+		attempt, err = cs.newAttemptLocked(isTransparent)
+		if err != nil {
+			// Only returns error if the clientconn is closed or the context of
+			// the stream is canceled.
+			return err
+		}
+		// Note that the first op in the replay buffer always sets cs.attempt
+		// if it is able to pick a transport and create a stream.
+		if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
+			return nil
+		}
+	}
+}
+
+func (cs *clientStream) Context() context.Context {
+	cs.commitAttempt()
+	// No need to lock before using attempt, since we know it is committed and
+	// cannot change.
+	if cs.attempt.s != nil {
+		return cs.attempt.s.Context()
+	}
+	return cs.ctx
+}
+
+func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
+	cs.mu.Lock()
+	for {
+		if cs.committed {
+			cs.mu.Unlock()
+			// toRPCErr is used in case the error from the attempt comes from
+			// NewClientStream, which intentionally doesn't return a status
+			// error to allow for further inspection; all other errors should
+			// already be status errors.
+			return toRPCErr(op(cs.attempt))
+		}
+		if len(cs.buffer) == 0 {
+			// For the first op, which controls creation of the stream and
+			// assigns cs.attempt, we need to create a new attempt inline
+			// before executing the first op.  On subsequent ops, the attempt
+			// is created immediately before replaying the ops.
+			var err error
+			if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil {
+				cs.mu.Unlock()
+				cs.finish(err)
+				return err
+			}
+		}
+		a := cs.attempt
+		cs.mu.Unlock()
+		err := op(a)
+		cs.mu.Lock()
+		if a != cs.attempt {
+			// We started another attempt already.
+			continue
+		}
+		if err == io.EOF {
+			<-a.s.Done()
+		}
+		if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
+			onSuccess()
+			cs.mu.Unlock()
+			return err
+		}
+		if err := cs.retryLocked(a, err); err != nil {
+			cs.mu.Unlock()
+			return err
+		}
+	}
+}
+
+func (cs *clientStream) Header() (metadata.MD, error) {
+	var m metadata.MD
+	noHeader := false
+	err := cs.withRetry(func(a *csAttempt) error {
+		var err error
+		m, err = a.s.Header()
+		if err == transport.ErrNoHeaders {
+			noHeader = true
+			return nil
+		}
+		return toRPCErr(err)
+	}, cs.commitAttemptLocked)
+
+	if err != nil {
+		cs.finish(err)
+		return nil, err
+	}
+
+	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader {
+		// Only log if binary log is on and header has not been logged, and
+		// there is actually headers to log.
+		logEntry := &binarylog.ServerHeader{
+			OnClientSide: true,
+			Header:       m,
+			PeerAddr:     nil,
+		}
+		if peer, ok := peer.FromContext(cs.Context()); ok {
+			logEntry.PeerAddr = peer.Addr
+		}
+		cs.serverHeaderBinlogged = true
+		for _, binlog := range cs.binlogs {
+			binlog.Log(logEntry)
+		}
+	}
+	return m, nil
+}
+
+func (cs *clientStream) Trailer() metadata.MD {
+	// On RPC failure, we never need to retry, because usage requires that
+	// RecvMsg() returned a non-nil error before calling this function is valid.
+	// We would have retried earlier if necessary.
+	//
+	// Commit the attempt anyway, just in case users are not following those
+	// directions -- it will prevent races and should not meaningfully impact
+	// performance.
+	cs.commitAttempt()
+	if cs.attempt.s == nil {
+		return nil
+	}
+	return cs.attempt.s.Trailer()
+}
+
+func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
+	for _, f := range cs.buffer {
+		if err := f(attempt); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
+	// Note: we still will buffer if retry is disabled (for transparent retries).
+	if cs.committed {
+		return
+	}
+	cs.bufferSize += sz
+	if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
+		cs.commitAttemptLocked()
+		return
+	}
+	cs.buffer = append(cs.buffer, op)
+}
+
+func (cs *clientStream) SendMsg(m interface{}) (err error) {
+	defer func() {
+		if err != nil && err != io.EOF {
+			// Call finish on the client stream for errors generated by this SendMsg
+			// call, as these indicate problems created by this client.  (Transport
+			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
+			// error will be returned from RecvMsg eventually in that case, or be
+			// retried.)
+			cs.finish(err)
+		}
+	}()
+	if cs.sentLast {
+		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
+	}
+	if !cs.desc.ClientStreams {
+		cs.sentLast = true
+	}
+
+	// load hdr, payload, data
+	hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
+	if err != nil {
+		return err
+	}
+
+	// TODO(dfawley): should we be checking len(data) instead?
+	if len(payload) > *cs.callInfo.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
+	}
+	op := func(a *csAttempt) error {
+		return a.sendMsg(m, hdr, payload, data)
+	}
+	err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
+	if len(cs.binlogs) != 0 && err == nil {
+		cm := &binarylog.ClientMessage{
+			OnClientSide: true,
+			Message:      data,
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cm)
+		}
+	}
+	return err
+}
+
+func (cs *clientStream) RecvMsg(m interface{}) error {
+	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
+		// Call Header() to binary log header if it's not already logged.
+		cs.Header()
+	}
+	var recvInfo *payloadInfo
+	if len(cs.binlogs) != 0 {
+		recvInfo = &payloadInfo{}
+	}
+	err := cs.withRetry(func(a *csAttempt) error {
+		return a.recvMsg(m, recvInfo)
+	}, cs.commitAttemptLocked)
+	if len(cs.binlogs) != 0 && err == nil {
+		sm := &binarylog.ServerMessage{
+			OnClientSide: true,
+			Message:      recvInfo.uncompressedBytes,
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(sm)
+		}
+	}
+	if err != nil || !cs.desc.ServerStreams {
+		// err != nil or non-server-streaming indicates end of stream.
+		cs.finish(err)
+
+		if len(cs.binlogs) != 0 {
+			// finish will not log Trailer. Log Trailer here.
+			logEntry := &binarylog.ServerTrailer{
+				OnClientSide: true,
+				Trailer:      cs.Trailer(),
+				Err:          err,
+			}
+			if logEntry.Err == io.EOF {
+				logEntry.Err = nil
+			}
+			if peer, ok := peer.FromContext(cs.Context()); ok {
+				logEntry.PeerAddr = peer.Addr
+			}
+			for _, binlog := range cs.binlogs {
+				binlog.Log(logEntry)
+			}
+		}
+	}
+	return err
+}
+
+func (cs *clientStream) CloseSend() error {
+	if cs.sentLast {
+		// TODO: return an error and finish the stream instead, due to API misuse?
+		return nil
+	}
+	cs.sentLast = true
+	op := func(a *csAttempt) error {
+		a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
+		// Always return nil; io.EOF is the only error that might make sense
+		// instead, but there is no need to signal the client to call RecvMsg
+		// as the only use left for the stream after CloseSend is to call
+		// RecvMsg.  This also matches historical behavior.
+		return nil
+	}
+	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
+	if len(cs.binlogs) != 0 {
+		chc := &binarylog.ClientHalfClose{
+			OnClientSide: true,
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(chc)
+		}
+	}
+	// We never returned an error here for reasons.
+	return nil
+}
+
+func (cs *clientStream) finish(err error) {
+	if err == io.EOF {
+		// Ending a stream with EOF indicates a success.
+		err = nil
+	}
+	cs.mu.Lock()
+	if cs.finished {
+		cs.mu.Unlock()
+		return
+	}
+	cs.finished = true
+	cs.commitAttemptLocked()
+	if cs.attempt != nil {
+		cs.attempt.finish(err)
+		// after functions all rely upon having a stream.
+		if cs.attempt.s != nil {
+			for _, o := range cs.opts {
+				o.after(cs.callInfo, cs.attempt)
+			}
+		}
+	}
+	cs.mu.Unlock()
+	// For binary logging. only log cancel in finish (could be caused by RPC ctx
+	// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
+	//
+	// Only one of cancel or trailer needs to be logged. In the cases where
+	// users don't call RecvMsg, users must have already canceled the RPC.
+	if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled {
+		c := &binarylog.Cancel{
+			OnClientSide: true,
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(c)
+		}
+	}
+	if err == nil {
+		cs.retryThrottler.successfulRPC()
+	}
+	if channelz.IsOn() {
+		if err != nil {
+			cs.cc.incrCallsFailed()
+		} else {
+			cs.cc.incrCallsSucceeded()
+		}
+	}
+	cs.cancel()
+}
+
+func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
+	cs := a.cs
+	if a.trInfo != nil {
+		a.mu.Lock()
+		if a.trInfo.tr != nil {
+			a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+		}
+		a.mu.Unlock()
+	}
+	if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
+		if !cs.desc.ClientStreams {
+			// For non-client-streaming RPCs, we return nil instead of EOF on error
+			// because the generated code requires it.  finish is not called; RecvMsg()
+			// will call it with the stream's status independently.
+			return nil
+		}
+		return io.EOF
+	}
+	for _, sh := range a.statsHandlers {
+		sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
+	}
+	if channelz.IsOn() {
+		a.t.IncrMsgSent()
+	}
+	return nil
+}
+
+func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
+	cs := a.cs
+	if len(a.statsHandlers) != 0 && payInfo == nil {
+		payInfo = &payloadInfo{}
+	}
+
+	if !a.decompSet {
+		// Block until we receive headers containing received message encoding.
+		if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+			if a.dc == nil || a.dc.Type() != ct {
+				// No configured decompressor, or it does not match the incoming
+				// message encoding; attempt to find a registered compressor that does.
+				a.dc = nil
+				a.decomp = encoding.GetCompressor(ct)
+			}
+		} else {
+			// No compression is used; disable our decompressor.
+			a.dc = nil
+		}
+		// Only initialize this state once per stream.
+		a.decompSet = true
+	}
+	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
+	if err != nil {
+		if err == io.EOF {
+			if statusErr := a.s.Status().Err(); statusErr != nil {
+				return statusErr
+			}
+			return io.EOF // indicates successful end of stream.
+		}
+
+		return toRPCErr(err)
+	}
+	if a.trInfo != nil {
+		a.mu.Lock()
+		if a.trInfo.tr != nil {
+			a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+		}
+		a.mu.Unlock()
+	}
+	for _, sh := range a.statsHandlers {
+		sh.HandleRPC(a.ctx, &stats.InPayload{
+			Client:   true,
+			RecvTime: time.Now(),
+			Payload:  m,
+			// TODO truncate large payload.
+			Data:       payInfo.uncompressedBytes,
+			WireLength: payInfo.wireLength + headerLen,
+			Length:     len(payInfo.uncompressedBytes),
+		})
+	}
+	if channelz.IsOn() {
+		a.t.IncrMsgRecv()
+	}
+	if cs.desc.ServerStreams {
+		// Subsequent messages should be received by subsequent RecvMsg calls.
+		return nil
+	}
+	// Special handling for non-server-stream rpcs.
+	// This recv expects EOF or errors, so we don't collect inPayload.
+	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
+	if err == nil {
+		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+	}
+	if err == io.EOF {
+		return a.s.Status().Err() // non-server streaming Recv returns nil on success
+	}
+	return toRPCErr(err)
+}
+
+func (a *csAttempt) finish(err error) {
+	a.mu.Lock()
+	if a.finished {
+		a.mu.Unlock()
+		return
+	}
+	a.finished = true
+	if err == io.EOF {
+		// Ending a stream with EOF indicates a success.
+		err = nil
+	}
+	var tr metadata.MD
+	if a.s != nil {
+		a.t.CloseStream(a.s, err)
+		tr = a.s.Trailer()
+	}
+
+	if a.pickResult.Done != nil {
+		br := false
+		if a.s != nil {
+			br = a.s.BytesReceived()
+		}
+		a.pickResult.Done(balancer.DoneInfo{
+			Err:           err,
+			Trailer:       tr,
+			BytesSent:     a.s != nil,
+			BytesReceived: br,
+			ServerLoad:    balancerload.Parse(tr),
+		})
+	}
+	for _, sh := range a.statsHandlers {
+		end := &stats.End{
+			Client:    true,
+			BeginTime: a.beginTime,
+			EndTime:   time.Now(),
+			Trailer:   tr,
+			Error:     err,
+		}
+		sh.HandleRPC(a.ctx, end)
+	}
+	if a.trInfo != nil && a.trInfo.tr != nil {
+		if err == nil {
+			a.trInfo.tr.LazyPrintf("RPC: [OK]")
+		} else {
+			a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
+			a.trInfo.tr.SetError()
+		}
+		a.trInfo.tr.Finish()
+		a.trInfo.tr = nil
+	}
+	a.mu.Unlock()
+}
+
+// newClientStream creates a ClientStream with the specified transport, on the
+// given addrConn.
+//
+// It's expected that the given transport is either the same one in addrConn, or
+// is already closed. To avoid race, transport is specified separately, instead
+// of using ac.transpot.
+//
+// Main difference between this and ClientConn.NewStream:
+// - no retry
+// - no service config (or wait for service config)
+// - no tracing or stats
+func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
+	if t == nil {
+		// TODO: return RPC error here?
+		return nil, errors.New("transport provided is nil")
+	}
+	// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
+	c := &callInfo{}
+
+	// Possible context leak:
+	// The cancel function for the child context we create will only be called
+	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
+	// an error is generated by SendMsg.
+	// https://github.com/grpc/grpc-go/issues/1818.
+	ctx, cancel := context.WithCancel(ctx)
+	defer func() {
+		if err != nil {
+			cancel()
+		}
+	}()
+
+	for _, o := range opts {
+		if err := o.before(c); err != nil {
+			return nil, toRPCErr(err)
+		}
+	}
+	c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+	c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
+	if err := setCallInfoCodec(c); err != nil {
+		return nil, err
+	}
+
+	callHdr := &transport.CallHdr{
+		Host:           ac.cc.authority,
+		Method:         method,
+		ContentSubtype: c.contentSubtype,
+	}
+
+	// Set our outgoing compression according to the UseCompressor CallOption, if
+	// set.  In that case, also find the compressor from the encoding package.
+	// Otherwise, use the compressor configured by the WithCompressor DialOption,
+	// if set.
+	var cp Compressor
+	var comp encoding.Compressor
+	if ct := c.compressorType; ct != "" {
+		callHdr.SendCompress = ct
+		if ct != encoding.Identity {
+			comp = encoding.GetCompressor(ct)
+			if comp == nil {
+				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+			}
+		}
+	} else if ac.cc.dopts.cp != nil {
+		callHdr.SendCompress = ac.cc.dopts.cp.Type()
+		cp = ac.cc.dopts.cp
+	}
+	if c.creds != nil {
+		callHdr.Creds = c.creds
+	}
+
+	// Use a special addrConnStream to avoid retry.
+	as := &addrConnStream{
+		callHdr:  callHdr,
+		ac:       ac,
+		ctx:      ctx,
+		cancel:   cancel,
+		opts:     opts,
+		callInfo: c,
+		desc:     desc,
+		codec:    c.codec,
+		cp:       cp,
+		comp:     comp,
+		t:        t,
+	}
+
+	s, err := as.t.NewStream(as.ctx, as.callHdr)
+	if err != nil {
+		err = toRPCErr(err)
+		return nil, err
+	}
+	as.s = s
+	as.p = &parser{r: s}
+	ac.incrCallsStarted()
+	if desc != unaryStreamDesc {
+		// Listen on cc and stream contexts to cleanup when the user closes the
+		// ClientConn or cancels the stream context.  In all other cases, an error
+		// should already be injected into the recv buffer by the transport, which
+		// the client will eventually receive, and then we will cancel the stream's
+		// context in clientStream.finish.
+		go func() {
+			select {
+			case <-ac.ctx.Done():
+				as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
+			case <-ctx.Done():
+				as.finish(toRPCErr(ctx.Err()))
+			}
+		}()
+	}
+	return as, nil
+}
+
+type addrConnStream struct {
+	s         *transport.Stream
+	ac        *addrConn
+	callHdr   *transport.CallHdr
+	cancel    context.CancelFunc
+	opts      []CallOption
+	callInfo  *callInfo
+	t         transport.ClientTransport
+	ctx       context.Context
+	sentLast  bool
+	desc      *StreamDesc
+	codec     baseCodec
+	cp        Compressor
+	comp      encoding.Compressor
+	decompSet bool
+	dc        Decompressor
+	decomp    encoding.Compressor
+	p         *parser
+	mu        sync.Mutex
+	finished  bool
+}
+
+func (as *addrConnStream) Header() (metadata.MD, error) {
+	m, err := as.s.Header()
+	if err != nil {
+		as.finish(toRPCErr(err))
+	}
+	return m, err
+}
+
+func (as *addrConnStream) Trailer() metadata.MD {
+	return as.s.Trailer()
+}
+
+func (as *addrConnStream) CloseSend() error {
+	if as.sentLast {
+		// TODO: return an error and finish the stream instead, due to API misuse?
+		return nil
+	}
+	as.sentLast = true
+
+	as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
+	// Always return nil; io.EOF is the only error that might make sense
+	// instead, but there is no need to signal the client to call RecvMsg
+	// as the only use left for the stream after CloseSend is to call
+	// RecvMsg.  This also matches historical behavior.
+	return nil
+}
+
+func (as *addrConnStream) Context() context.Context {
+	return as.s.Context()
+}
+
+func (as *addrConnStream) SendMsg(m interface{}) (err error) {
+	defer func() {
+		if err != nil && err != io.EOF {
+			// Call finish on the client stream for errors generated by this SendMsg
+			// call, as these indicate problems created by this client.  (Transport
+			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
+			// error will be returned from RecvMsg eventually in that case, or be
+			// retried.)
+			as.finish(err)
+		}
+	}()
+	if as.sentLast {
+		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
+	}
+	if !as.desc.ClientStreams {
+		as.sentLast = true
+	}
+
+	// load hdr, payload, data
+	hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
+	if err != nil {
+		return err
+	}
+
+	// TODO(dfawley): should we be checking len(data) instead?
+	if len(payld) > *as.callInfo.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
+	}
+
+	if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+		if !as.desc.ClientStreams {
+			// For non-client-streaming RPCs, we return nil instead of EOF on error
+			// because the generated code requires it.  finish is not called; RecvMsg()
+			// will call it with the stream's status independently.
+			return nil
+		}
+		return io.EOF
+	}
+
+	if channelz.IsOn() {
+		as.t.IncrMsgSent()
+	}
+	return nil
+}
+
+func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
+	defer func() {
+		if err != nil || !as.desc.ServerStreams {
+			// err != nil or non-server-streaming indicates end of stream.
+			as.finish(err)
+		}
+	}()
+
+	if !as.decompSet {
+		// Block until we receive headers containing received message encoding.
+		if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+			if as.dc == nil || as.dc.Type() != ct {
+				// No configured decompressor, or it does not match the incoming
+				// message encoding; attempt to find a registered compressor that does.
+				as.dc = nil
+				as.decomp = encoding.GetCompressor(ct)
+			}
+		} else {
+			// No compression is used; disable our decompressor.
+			as.dc = nil
+		}
+		// Only initialize this state once per stream.
+		as.decompSet = true
+	}
+	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
+	if err != nil {
+		if err == io.EOF {
+			if statusErr := as.s.Status().Err(); statusErr != nil {
+				return statusErr
+			}
+			return io.EOF // indicates successful end of stream.
+		}
+		return toRPCErr(err)
+	}
+
+	if channelz.IsOn() {
+		as.t.IncrMsgRecv()
+	}
+	if as.desc.ServerStreams {
+		// Subsequent messages should be received by subsequent RecvMsg calls.
+		return nil
+	}
+
+	// Special handling for non-server-stream rpcs.
+	// This recv expects EOF or errors, so we don't collect inPayload.
+	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
+	if err == nil {
+		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+	}
+	if err == io.EOF {
+		return as.s.Status().Err() // non-server streaming Recv returns nil on success
+	}
+	return toRPCErr(err)
+}
+
+func (as *addrConnStream) finish(err error) {
+	as.mu.Lock()
+	if as.finished {
+		as.mu.Unlock()
+		return
+	}
+	as.finished = true
+	if err == io.EOF {
+		// Ending a stream with EOF indicates a success.
+		err = nil
+	}
+	if as.s != nil {
+		as.t.CloseStream(as.s, err)
+	}
+
+	if err != nil {
+		as.ac.incrCallsFailed()
+	} else {
+		as.ac.incrCallsSucceeded()
+	}
+	as.cancel()
+	as.mu.Unlock()
+}
+
+// ServerStream defines the server-side behavior of a streaming RPC.
+//
+// Errors returned from ServerStream methods are compatible with the status
+// package.  However, the status code will often not match the RPC status as
+// seen by the client application, and therefore, should not be relied upon for
+// this purpose.
+type ServerStream interface {
+	// SetHeader sets the header metadata. It may be called multiple times.
+	// When call multiple times, all the provided metadata will be merged.
+	// All the metadata will be sent out when one of the following happens:
+	//  - ServerStream.SendHeader() is called;
+	//  - The first response is sent out;
+	//  - An RPC status is sent out (error or success).
+	SetHeader(metadata.MD) error
+	// SendHeader sends the header metadata.
+	// The provided md and headers set by SetHeader() will be sent.
+	// It fails if called multiple times.
+	SendHeader(metadata.MD) error
+	// SetTrailer sets the trailer metadata which will be sent with the RPC status.
+	// When called more than once, all the provided metadata will be merged.
+	SetTrailer(metadata.MD)
+	// Context returns the context for this stream.
+	Context() context.Context
+	// SendMsg sends a message. On error, SendMsg aborts the stream and the
+	// error is returned directly.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the client. An
+	// untimely stream closure may result in lost messages.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines.
+	//
+	// It is not safe to modify the message after calling SendMsg. Tracing
+	// libraries and stats handlers may use the message lazily.
+	SendMsg(m interface{}) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the client has performed a CloseSend. On
+	// any non-EOF error, the stream is aborted and the error contains the
+	// RPC status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m interface{}) error
+}
+
+// serverStream implements a server side Stream.
+type serverStream struct {
+	ctx   context.Context
+	t     transport.ServerTransport
+	s     *transport.Stream
+	p     *parser
+	codec baseCodec
+
+	cp     Compressor
+	dc     Decompressor
+	comp   encoding.Compressor
+	decomp encoding.Compressor
+
+	maxReceiveMessageSize int
+	maxSendMessageSize    int
+	trInfo                *traceInfo
+
+	statsHandler []stats.Handler
+
+	binlogs []binarylog.MethodLogger
+	// serverHeaderBinlogged indicates whether server header has been logged. It
+	// will happen when one of the following two happens: stream.SendHeader(),
+	// stream.Send().
+	//
+	// It's only checked in send and sendHeader, doesn't need to be
+	// synchronized.
+	serverHeaderBinlogged bool
+
+	mu sync.Mutex // protects trInfo.tr after the service handler runs.
+}
+
+func (ss *serverStream) Context() context.Context {
+	return ss.ctx
+}
+
+func (ss *serverStream) SetHeader(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	err := imetadata.Validate(md)
+	if err != nil {
+		return status.Error(codes.Internal, err.Error())
+	}
+	return ss.s.SetHeader(md)
+}
+
+func (ss *serverStream) SendHeader(md metadata.MD) error {
+	err := imetadata.Validate(md)
+	if err != nil {
+		return status.Error(codes.Internal, err.Error())
+	}
+
+	err = ss.t.WriteHeader(ss.s, md)
+	if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
+		h, _ := ss.s.Header()
+		sh := &binarylog.ServerHeader{
+			Header: h,
+		}
+		ss.serverHeaderBinlogged = true
+		for _, binlog := range ss.binlogs {
+			binlog.Log(sh)
+		}
+	}
+	return err
+}
+
+func (ss *serverStream) SetTrailer(md metadata.MD) {
+	if md.Len() == 0 {
+		return
+	}
+	if err := imetadata.Validate(md); err != nil {
+		logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
+	}
+	ss.s.SetTrailer(md)
+}
+
+func (ss *serverStream) SendMsg(m interface{}) (err error) {
+	defer func() {
+		if ss.trInfo != nil {
+			ss.mu.Lock()
+			if ss.trInfo.tr != nil {
+				if err == nil {
+					ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+				} else {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+			}
+			ss.mu.Unlock()
+		}
+		if err != nil && err != io.EOF {
+			st, _ := status.FromError(toRPCErr(err))
+			ss.t.WriteStatus(ss.s, st)
+			// Non-user specified status was sent out. This should be an error
+			// case (as a server side Cancel maybe).
+			//
+			// This is not handled specifically now. User will return a final
+			// status from the service handler, we will log that error instead.
+			// This behavior is similar to an interceptor.
+		}
+		if channelz.IsOn() && err == nil {
+			ss.t.IncrMsgSent()
+		}
+	}()
+
+	// load hdr, payload, data
+	hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
+	if err != nil {
+		return err
+	}
+
+	// TODO(dfawley): should we be checking len(data) instead?
+	if len(payload) > ss.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
+	}
+	if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
+		return toRPCErr(err)
+	}
+	if len(ss.binlogs) != 0 {
+		if !ss.serverHeaderBinlogged {
+			h, _ := ss.s.Header()
+			sh := &binarylog.ServerHeader{
+				Header: h,
+			}
+			ss.serverHeaderBinlogged = true
+			for _, binlog := range ss.binlogs {
+				binlog.Log(sh)
+			}
+		}
+		sm := &binarylog.ServerMessage{
+			Message: data,
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(sm)
+		}
+	}
+	if len(ss.statsHandler) != 0 {
+		for _, sh := range ss.statsHandler {
+			sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+		}
+	}
+	return nil
+}
+
+func (ss *serverStream) RecvMsg(m interface{}) (err error) {
+	defer func() {
+		if ss.trInfo != nil {
+			ss.mu.Lock()
+			if ss.trInfo.tr != nil {
+				if err == nil {
+					ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+				} else if err != io.EOF {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+			}
+			ss.mu.Unlock()
+		}
+		if err != nil && err != io.EOF {
+			st, _ := status.FromError(toRPCErr(err))
+			ss.t.WriteStatus(ss.s, st)
+			// Non-user specified status was sent out. This should be an error
+			// case (as a server side Cancel maybe).
+			//
+			// This is not handled specifically now. User will return a final
+			// status from the service handler, we will log that error instead.
+			// This behavior is similar to an interceptor.
+		}
+		if channelz.IsOn() && err == nil {
+			ss.t.IncrMsgRecv()
+		}
+	}()
+	var payInfo *payloadInfo
+	if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
+		payInfo = &payloadInfo{}
+	}
+	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
+		if err == io.EOF {
+			if len(ss.binlogs) != 0 {
+				chc := &binarylog.ClientHalfClose{}
+				for _, binlog := range ss.binlogs {
+					binlog.Log(chc)
+				}
+			}
+			return err
+		}
+		if err == io.ErrUnexpectedEOF {
+			err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+		}
+		return toRPCErr(err)
+	}
+	if len(ss.statsHandler) != 0 {
+		for _, sh := range ss.statsHandler {
+			sh.HandleRPC(ss.s.Context(), &stats.InPayload{
+				RecvTime: time.Now(),
+				Payload:  m,
+				// TODO truncate large payload.
+				Data:       payInfo.uncompressedBytes,
+				WireLength: payInfo.wireLength + headerLen,
+				Length:     len(payInfo.uncompressedBytes),
+			})
+		}
+	}
+	if len(ss.binlogs) != 0 {
+		cm := &binarylog.ClientMessage{
+			Message: payInfo.uncompressedBytes,
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(cm)
+		}
+	}
+	return nil
+}
+
+// MethodFromServerStream returns the method string for the input stream.
+// The returned string is in the format of "/service/method".
+func MethodFromServerStream(stream ServerStream) (string, bool) {
+	return Method(stream.Context())
+}
+
+// prepareMsg returns the hdr, payload and data
+// using the compressors passed or using the
+// passed preparedmsg
+func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
+	if preparedMsg, ok := m.(*PreparedMsg); ok {
+		return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
+	}
+	// The input interface is not a prepared msg.
+	// Marshal and Compress the data at this point
+	data, err = encode(codec, m)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	compData, err := compress(data, cp, comp)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	hdr, payload = msgHeader(data, compData)
+	return hdr, payload, data, nil
+}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
new file mode 100644
index 000000000..bfa5dfa40
--- /dev/null
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package tap defines the function handles which are executed on the transport
+// layer of gRPC-Go and related information.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package tap
+
+import (
+	"context"
+)
+
+// Info defines the relevant information needed by the handles.
+type Info struct {
+	// FullMethodName is the string of grpc method (in the format of
+	// /package.service/method).
+	FullMethodName string
+	// TODO: More to be added.
+}
+
+// ServerInHandle defines the function which runs before a new stream is
+// created on the server side. If it returns a non-nil error, the stream will
+// not be created and an error will be returned to the client.  If the error
+// returned is a status error, that status code and message will be used,
+// otherwise PermissionDenied will be the code and err.Error() will be the
+// message.
+//
+// It's intended to be used in situations where you don't want to waste the
+// resources to accept the new stream (e.g. rate-limiting). For other general
+// usages, please use interceptors.
+//
+// Note that it is executed in the per-connection I/O goroutine(s) instead of
+// per-RPC goroutine. Therefore, users should NOT have any
+// blocking/time-consuming work in this handle. Otherwise all the RPCs would
+// slow down. Also, for the same reason, this handle won't be called
+// concurrently by gRPC.
+type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
new file mode 100644
index 000000000..07a2d26b3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/trace.go
@@ -0,0 +1,123 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/trace"
+)
+
+// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
+// This should only be set before any RPCs are sent or received by this program.
+var EnableTracing bool
+
+// methodFamily returns the trace family for the given method.
+// It turns "/pkg.Service/GetFoo" into "pkg.Service".
+func methodFamily(m string) string {
+	m = strings.TrimPrefix(m, "/") // remove leading slash
+	if i := strings.Index(m, "/"); i >= 0 {
+		m = m[:i] // remove everything from second slash
+	}
+	return m
+}
+
+// traceInfo contains tracing information for an RPC.
+type traceInfo struct {
+	tr        trace.Trace
+	firstLine firstLine
+}
+
+// firstLine is the first line of an RPC trace.
+// It may be mutated after construction; remoteAddr specifically may change
+// during client-side use.
+type firstLine struct {
+	mu         sync.Mutex
+	client     bool // whether this is a client (outgoing) RPC
+	remoteAddr net.Addr
+	deadline   time.Duration // may be zero
+}
+
+func (f *firstLine) SetRemoteAddr(addr net.Addr) {
+	f.mu.Lock()
+	f.remoteAddr = addr
+	f.mu.Unlock()
+}
+
+func (f *firstLine) String() string {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+
+	var line bytes.Buffer
+	io.WriteString(&line, "RPC: ")
+	if f.client {
+		io.WriteString(&line, "to")
+	} else {
+		io.WriteString(&line, "from")
+	}
+	fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
+	if f.deadline != 0 {
+		fmt.Fprint(&line, f.deadline)
+	} else {
+		io.WriteString(&line, "none")
+	}
+	return line.String()
+}
+
+const truncateSize = 100
+
+func truncate(x string, l int) string {
+	if l > len(x) {
+		return x
+	}
+	return x[:l]
+}
+
+// payload represents an RPC request or response payload.
+type payload struct {
+	sent bool        // whether this is an outgoing payload
+	msg  interface{} // e.g. a proto.Message
+	// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
+}
+
+func (p payload) String() string {
+	if p.sent {
+		return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize)
+	}
+	return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize)
+}
+
+type fmtStringer struct {
+	format string
+	a      []interface{}
+}
+
+func (f *fmtStringer) String() string {
+	return fmt.Sprintf(f.format, f.a...)
+}
+
+type stringer string
+
+func (s stringer) String() string { return string(s) }
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
new file mode 100644
index 000000000..fe552c315
--- /dev/null
+++ b/vendor/google.golang.org/grpc/version.go
@@ -0,0 +1,22 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+// Version is the current grpc version.
+const Version = "1.53.0"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
new file mode 100644
index 000000000..3728aed04
--- /dev/null
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -0,0 +1,217 @@
+#!/bin/bash
+
+set -ex  # Exit on error; debugging enabled.
+set -o pipefail  # Fail a pipe if any sub-command fails.
+
+# not makes sure the command passed to it does not exit with a return code of 0.
+not() {
+  # This is required instead of the earlier (! $COMMAND) because subshells and
+  # pipefail don't work the same on Darwin as in Linux.
+  ! "$@"
+}
+
+die() {
+  echo "$@" >&2
+  exit 1
+}
+
+fail_on_output() {
+  tee /dev/stderr | not read
+}
+
+# Check to make sure it's safe to modify the user's git repo.
+git status --porcelain | fail_on_output
+
+# Undo any edits made by this script.
+cleanup() {
+  git reset --hard HEAD
+}
+trap cleanup EXIT
+
+PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}"
+go version
+
+if [[ "$1" = "-install" ]]; then
+  # Install the pinned versions as defined in module tools.
+  pushd ./test/tools
+  go install \
+    golang.org/x/lint/golint \
+    golang.org/x/tools/cmd/goimports \
+    honnef.co/go/tools/cmd/staticcheck \
+    github.com/client9/misspell/cmd/misspell
+  popd
+  if [[ -z "${VET_SKIP_PROTO}" ]]; then
+    if [[ "${TRAVIS}" = "true" ]]; then
+      PROTOBUF_VERSION=3.14.0
+      PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
+      pushd /home/travis
+      wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
+      unzip ${PROTOC_FILENAME}
+      bin/protoc --version
+      popd
+    elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then
+      PROTOBUF_VERSION=3.14.0
+      PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
+      pushd /home/runner/go
+      wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
+      unzip ${PROTOC_FILENAME}
+      bin/protoc --version
+      popd
+    elif not which protoc > /dev/null; then
+      die "Please install protoc into your path"
+    fi
+  fi
+  exit 0
+elif [[ "$#" -ne 0 ]]; then
+  die "Unknown argument(s): $*"
+fi
+
+# - Check that generated proto files are up to date.
+if [[ -z "${VET_SKIP_PROTO}" ]]; then
+  PATH="/home/travis/bin:${PATH}" make proto && \
+    git status --porcelain 2>&1 | fail_on_output || \
+    (git status; git --no-pager diff; exit 1)
+fi
+
+if [[ -n "${VET_ONLY_PROTO}" ]]; then
+  exit 0
+fi
+
+# - Ensure all source files contain a copyright message.
+# (Done in two parts because Darwin "git grep" has broken support for compound
+# exclusion matches.)
+(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output
+
+# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
+not grep 'func Test[^(]' *_test.go
+not grep 'func Test[^(]' test/*.go
+
+# - Do not import x/net/context.
+not git grep -l 'x/net/context' -- "*.go"
+
+# - Do not import math/rand for real library code.  Use internal/grpcrand for
+#   thread safety.
+git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
+
+# - Do not call grpclog directly. Use grpclog.Component instead.
+git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
+
+# - Ensure all ptypes proto packages are renamed when importing.
+not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
+
+# - Ensure all xds proto imports are renamed to *pb or *grpc.
+git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
+
+misspell -error .
+
+# - gofmt, goimports, golint (with exceptions for generated code), go vet,
+# go mod tidy.
+# Perform these checks on each module inside gRPC.
+for MOD_FILE in $(find . -name 'go.mod'); do
+  MOD_DIR=$(dirname ${MOD_FILE})
+  pushd ${MOD_DIR}
+  go vet -all ./... | fail_on_output
+  gofmt -s -d -l . 2>&1 | fail_on_output
+  goimports -l . 2>&1 | not grep -vE "\.pb\.go"
+  golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
+
+  go mod tidy -compat=1.17
+  git status --porcelain 2>&1 | fail_on_output || \
+    (git status; git --no-pager diff; exit 1)
+  popd
+done
+
+# - Collection of static analysis checks
+#
+# TODO(dfawley): don't use deprecated functions in examples or first-party
+# plugins.
+# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs.
+SC_OUT="$(mktemp)"
+staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true
+# Error if anything other than deprecation warnings are printed.
+not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
+# Only ignore the following deprecated types/fields/functions.
+not grep -Fv '.CredsBundle
+.HeaderMap
+.Metadata is deprecated: use Attributes
+.NewAddress
+.NewServiceConfig
+.Type is deprecated: use Attributes
+BuildVersion is deprecated
+balancer.ErrTransientFailure
+balancer.Picker
+extDesc.Filename is deprecated
+github.com/golang/protobuf/jsonpb is deprecated
+grpc.CallCustomCodec
+grpc.Code
+grpc.Compressor
+grpc.CustomCodec
+grpc.Decompressor
+grpc.MaxMsgSize
+grpc.MethodConfig
+grpc.NewGZIPCompressor
+grpc.NewGZIPDecompressor
+grpc.RPCCompressor
+grpc.RPCDecompressor
+grpc.ServiceConfig
+grpc.WithCompressor
+grpc.WithDecompressor
+grpc.WithDialer
+grpc.WithMaxMsgSize
+grpc.WithServiceConfig
+grpc.WithTimeout
+http.CloseNotifier
+info.SecurityVersion
+proto is deprecated
+proto.InternalMessageInfo is deprecated
+proto.EnumName is deprecated
+proto.ErrInternalBadWireType is deprecated
+proto.FileDescriptor is deprecated
+proto.Marshaler is deprecated
+proto.MessageType is deprecated
+proto.RegisterEnum is deprecated
+proto.RegisterFile is deprecated
+proto.RegisterType is deprecated
+proto.RegisterExtension is deprecated
+proto.RegisteredExtension is deprecated
+proto.RegisteredExtensions is deprecated
+proto.RegisterMapType is deprecated
+proto.Unmarshaler is deprecated
+resolver.Backend
+resolver.GRPCLB
+Target is deprecated: Use the Target field in the BuildOptions instead.
+xxx_messageInfo_
+' "${SC_OUT}"
+
+# - special golint on package comments.
+lint_package_comment_per_package() {
+  # Number of files in this go package.
+  fileCount=$(go list -f '{{len .GoFiles}}' $1)
+  if [ ${fileCount} -eq 0 ]; then
+    return 0
+  fi
+  # Number of package errors generated by golint.
+  lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment")
+  # golint complains about every file that's missing the package comment. If the
+  # number of files for this package is greater than the number of errors, there's
+  # at least one file with package comment, good. Otherwise, fail.
+  if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then
+    echo "Package $1 (with ${fileCount} files) is missing package comment"
+    return 1
+  fi
+}
+lint_package_comment() {
+  set +ex
+
+  count=0
+  for i in $(go list ./...); do
+    lint_package_comment_per_package "$i"
+    ((count += $?))
+  done
+
+  set -ex
+  return $count
+}
+lint_package_comment
+
+echo SUCCESS
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
new file mode 100644
index 000000000..5f28148d8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -0,0 +1,665 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protojson
+
+import (
+	"encoding/base64"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+
+	"google.golang.org/protobuf/internal/encoding/json"
+	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/flags"
+	"google.golang.org/protobuf/internal/genid"
+	"google.golang.org/protobuf/internal/pragma"
+	"google.golang.org/protobuf/internal/set"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Unmarshal reads the given []byte into the given proto.Message.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func Unmarshal(b []byte, m proto.Message) error {
+	return UnmarshalOptions{}.Unmarshal(b, m)
+}
+
+// UnmarshalOptions is a configurable JSON format parser.
+type UnmarshalOptions struct {
+	pragma.NoUnkeyedLiterals
+
+	// If AllowPartial is set, input for messages that will result in missing
+	// required fields will not return an error.
+	AllowPartial bool
+
+	// If DiscardUnknown is set, unknown fields are ignored.
+	DiscardUnknown bool
+
+	// Resolver is used for looking up types when unmarshaling
+	// google.protobuf.Any messages or extension fields.
+	// If nil, this defaults to using protoregistry.GlobalTypes.
+	Resolver interface {
+		protoregistry.MessageTypeResolver
+		protoregistry.ExtensionTypeResolver
+	}
+}
+
+// Unmarshal reads the given []byte and populates the given proto.Message
+// using options in the UnmarshalOptions object.
+// It will clear the message first before setting the fields.
+// If it returns an error, the given message may be partially set.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
+	return o.unmarshal(b, m)
+}
+
+// unmarshal is a centralized function that all unmarshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for unmarshal that do not go through this.
+func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
+	proto.Reset(m)
+
+	if o.Resolver == nil {
+		o.Resolver = protoregistry.GlobalTypes
+	}
+
+	dec := decoder{json.NewDecoder(b), o}
+	if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
+		return err
+	}
+
+	// Check for EOF.
+	tok, err := dec.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.EOF {
+		return dec.unexpectedTokenError(tok)
+	}
+
+	if o.AllowPartial {
+		return nil
+	}
+	return proto.CheckInitialized(m)
+}
+
+type decoder struct {
+	*json.Decoder
+	opts UnmarshalOptions
+}
+
+// newError returns an error object with position info.
+func (d decoder) newError(pos int, f string, x ...interface{}) error {
+	line, column := d.Position(pos)
+	head := fmt.Sprintf("(line %d:%d): ", line, column)
+	return errors.New(head+f, x...)
+}
+
+// unexpectedTokenError returns a syntax error for the given unexpected token.
+func (d decoder) unexpectedTokenError(tok json.Token) error {
+	return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString())
+}
+
+// syntaxError returns a syntax error for given position.
+func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+	line, column := d.Position(pos)
+	head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
+	return errors.New(head+f, x...)
+}
+
+// unmarshalMessage unmarshals a message into the given protoreflect.Message.
+func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
+	if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
+		return unmarshal(d, m)
+	}
+
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.ObjectOpen {
+		return d.unexpectedTokenError(tok)
+	}
+
+	messageDesc := m.Descriptor()
+	if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
+		return errors.New("no support for proto1 MessageSets")
+	}
+
+	var seenNums set.Ints
+	var seenOneofs set.Ints
+	fieldDescs := messageDesc.Fields()
+	for {
+		// Read field name.
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		default:
+			return d.unexpectedTokenError(tok)
+		case json.ObjectClose:
+			return nil
+		case json.Name:
+			// Continue below.
+		}
+
+		name := tok.Name()
+		// Unmarshaling a non-custom embedded message in Any will contain the
+		// JSON field "@type" which should be skipped because it is not a field
+		// of the embedded message, but simply an artifact of the Any format.
+		if skipTypeURL && name == "@type" {
+			d.Read()
+			continue
+		}
+
+		// Get the FieldDescriptor.
+		var fd protoreflect.FieldDescriptor
+		if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
+			// Only extension names are in [name] format.
+			extName := protoreflect.FullName(name[1 : len(name)-1])
+			extType, err := d.opts.Resolver.FindExtensionByName(extName)
+			if err != nil && err != protoregistry.NotFound {
+				return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
+			}
+			if extType != nil {
+				fd = extType.TypeDescriptor()
+				if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
+					return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
+				}
+			}
+		} else {
+			// The name can either be the JSON name or the proto field name.
+			fd = fieldDescs.ByJSONName(name)
+			if fd == nil {
+				fd = fieldDescs.ByTextName(name)
+			}
+		}
+		if flags.ProtoLegacy {
+			if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
+				fd = nil // reset since the weak reference is not linked in
+			}
+		}
+
+		if fd == nil {
+			// Field is unknown.
+			if d.opts.DiscardUnknown {
+				if err := d.skipJSONValue(); err != nil {
+					return err
+				}
+				continue
+			}
+			return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
+		}
+
+		// Do not allow duplicate fields.
+		num := uint64(fd.Number())
+		if seenNums.Has(num) {
+			return d.newError(tok.Pos(), "duplicate field %v", tok.RawString())
+		}
+		seenNums.Set(num)
+
+		// No need to set values for JSON null unless the field type is
+		// google.protobuf.Value or google.protobuf.NullValue.
+		if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) {
+			d.Read()
+			continue
+		}
+
+		switch {
+		case fd.IsList():
+			list := m.Mutable(fd).List()
+			if err := d.unmarshalList(list, fd); err != nil {
+				return err
+			}
+		case fd.IsMap():
+			mmap := m.Mutable(fd).Map()
+			if err := d.unmarshalMap(mmap, fd); err != nil {
+				return err
+			}
+		default:
+			// If field is a oneof, check if it has already been set.
+			if od := fd.ContainingOneof(); od != nil {
+				idx := uint64(od.Index())
+				if seenOneofs.Has(idx) {
+					return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName())
+				}
+				seenOneofs.Set(idx)
+			}
+
+			// Required or optional fields.
+			if err := d.unmarshalSingular(m, fd); err != nil {
+				return err
+			}
+		}
+	}
+}
+
+func isKnownValue(fd protoreflect.FieldDescriptor) bool {
+	md := fd.Message()
+	return md != nil && md.FullName() == genid.Value_message_fullname
+}
+
+func isNullValue(fd protoreflect.FieldDescriptor) bool {
+	ed := fd.Enum()
+	return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
+}
+
+// unmarshalSingular unmarshals to the non-repeated field specified
+// by the given FieldDescriptor.
+func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error {
+	var val protoreflect.Value
+	var err error
+	switch fd.Kind() {
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		val = m.NewField(fd)
+		err = d.unmarshalMessage(val.Message(), false)
+	default:
+		val, err = d.unmarshalScalar(fd)
+	}
+
+	if err != nil {
+		return err
+	}
+	m.Set(fd, val)
+	return nil
+}
+
+// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
+// the given FieldDescriptor.
+func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	const b32 int = 32
+	const b64 int = 64
+
+	tok, err := d.Read()
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+
+	kind := fd.Kind()
+	switch kind {
+	case protoreflect.BoolKind:
+		if tok.Kind() == json.Bool {
+			return protoreflect.ValueOfBool(tok.Bool()), nil
+		}
+
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		if v, ok := unmarshalInt(tok, b32); ok {
+			return v, nil
+		}
+
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		if v, ok := unmarshalInt(tok, b64); ok {
+			return v, nil
+		}
+
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		if v, ok := unmarshalUint(tok, b32); ok {
+			return v, nil
+		}
+
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		if v, ok := unmarshalUint(tok, b64); ok {
+			return v, nil
+		}
+
+	case protoreflect.FloatKind:
+		if v, ok := unmarshalFloat(tok, b32); ok {
+			return v, nil
+		}
+
+	case protoreflect.DoubleKind:
+		if v, ok := unmarshalFloat(tok, b64); ok {
+			return v, nil
+		}
+
+	case protoreflect.StringKind:
+		if tok.Kind() == json.String {
+			return protoreflect.ValueOfString(tok.ParsedString()), nil
+		}
+
+	case protoreflect.BytesKind:
+		if v, ok := unmarshalBytes(tok); ok {
+			return v, nil
+		}
+
+	case protoreflect.EnumKind:
+		if v, ok := unmarshalEnum(tok, fd); ok {
+			return v, nil
+		}
+
+	default:
+		panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
+	}
+
+	return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+}
+
+func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	switch tok.Kind() {
+	case json.Number:
+		return getInt(tok, bitSize)
+
+	case json.String:
+		// Decode number from string.
+		s := strings.TrimSpace(tok.ParsedString())
+		if len(s) != len(tok.ParsedString()) {
+			return protoreflect.Value{}, false
+		}
+		dec := json.NewDecoder([]byte(s))
+		tok, err := dec.Read()
+		if err != nil {
+			return protoreflect.Value{}, false
+		}
+		return getInt(tok, bitSize)
+	}
+	return protoreflect.Value{}, false
+}
+
+func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	n, ok := tok.Int(bitSize)
+	if !ok {
+		return protoreflect.Value{}, false
+	}
+	if bitSize == 32 {
+		return protoreflect.ValueOfInt32(int32(n)), true
+	}
+	return protoreflect.ValueOfInt64(n), true
+}
+
+func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	switch tok.Kind() {
+	case json.Number:
+		return getUint(tok, bitSize)
+
+	case json.String:
+		// Decode number from string.
+		s := strings.TrimSpace(tok.ParsedString())
+		if len(s) != len(tok.ParsedString()) {
+			return protoreflect.Value{}, false
+		}
+		dec := json.NewDecoder([]byte(s))
+		tok, err := dec.Read()
+		if err != nil {
+			return protoreflect.Value{}, false
+		}
+		return getUint(tok, bitSize)
+	}
+	return protoreflect.Value{}, false
+}
+
+func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	n, ok := tok.Uint(bitSize)
+	if !ok {
+		return protoreflect.Value{}, false
+	}
+	if bitSize == 32 {
+		return protoreflect.ValueOfUint32(uint32(n)), true
+	}
+	return protoreflect.ValueOfUint64(n), true
+}
+
+func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	switch tok.Kind() {
+	case json.Number:
+		return getFloat(tok, bitSize)
+
+	case json.String:
+		s := tok.ParsedString()
+		switch s {
+		case "NaN":
+			if bitSize == 32 {
+				return protoreflect.ValueOfFloat32(float32(math.NaN())), true
+			}
+			return protoreflect.ValueOfFloat64(math.NaN()), true
+		case "Infinity":
+			if bitSize == 32 {
+				return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true
+			}
+			return protoreflect.ValueOfFloat64(math.Inf(+1)), true
+		case "-Infinity":
+			if bitSize == 32 {
+				return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true
+			}
+			return protoreflect.ValueOfFloat64(math.Inf(-1)), true
+		}
+
+		// Decode number from string.
+		if len(s) != len(strings.TrimSpace(s)) {
+			return protoreflect.Value{}, false
+		}
+		dec := json.NewDecoder([]byte(s))
+		tok, err := dec.Read()
+		if err != nil {
+			return protoreflect.Value{}, false
+		}
+		return getFloat(tok, bitSize)
+	}
+	return protoreflect.Value{}, false
+}
+
+func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	n, ok := tok.Float(bitSize)
+	if !ok {
+		return protoreflect.Value{}, false
+	}
+	if bitSize == 32 {
+		return protoreflect.ValueOfFloat32(float32(n)), true
+	}
+	return protoreflect.ValueOfFloat64(n), true
+}
+
+func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
+	if tok.Kind() != json.String {
+		return protoreflect.Value{}, false
+	}
+
+	s := tok.ParsedString()
+	enc := base64.StdEncoding
+	if strings.ContainsAny(s, "-_") {
+		enc = base64.URLEncoding
+	}
+	if len(s)%4 != 0 {
+		enc = enc.WithPadding(base64.NoPadding)
+	}
+	b, err := enc.DecodeString(s)
+	if err != nil {
+		return protoreflect.Value{}, false
+	}
+	return protoreflect.ValueOfBytes(b), true
+}
+
+func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) {
+	switch tok.Kind() {
+	case json.String:
+		// Lookup EnumNumber based on name.
+		s := tok.ParsedString()
+		if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
+			return protoreflect.ValueOfEnum(enumVal.Number()), true
+		}
+
+	case json.Number:
+		if n, ok := tok.Int(32); ok {
+			return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true
+		}
+
+	case json.Null:
+		// This is only valid for google.protobuf.NullValue.
+		if isNullValue(fd) {
+			return protoreflect.ValueOfEnum(0), true
+		}
+	}
+
+	return protoreflect.Value{}, false
+}
+
+func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.ArrayOpen {
+		return d.unexpectedTokenError(tok)
+	}
+
+	switch fd.Kind() {
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		for {
+			tok, err := d.Peek()
+			if err != nil {
+				return err
+			}
+
+			if tok.Kind() == json.ArrayClose {
+				d.Read()
+				return nil
+			}
+
+			val := list.NewElement()
+			if err := d.unmarshalMessage(val.Message(), false); err != nil {
+				return err
+			}
+			list.Append(val)
+		}
+	default:
+		for {
+			tok, err := d.Peek()
+			if err != nil {
+				return err
+			}
+
+			if tok.Kind() == json.ArrayClose {
+				d.Read()
+				return nil
+			}
+
+			val, err := d.unmarshalScalar(fd)
+			if err != nil {
+				return err
+			}
+			list.Append(val)
+		}
+	}
+
+	return nil
+}
+
+func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.ObjectOpen {
+		return d.unexpectedTokenError(tok)
+	}
+
+	// Determine ahead whether map entry is a scalar type or a message type in
+	// order to call the appropriate unmarshalMapValue func inside the for loop
+	// below.
+	var unmarshalMapValue func() (protoreflect.Value, error)
+	switch fd.MapValue().Kind() {
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		unmarshalMapValue = func() (protoreflect.Value, error) {
+			val := mmap.NewValue()
+			if err := d.unmarshalMessage(val.Message(), false); err != nil {
+				return protoreflect.Value{}, err
+			}
+			return val, nil
+		}
+	default:
+		unmarshalMapValue = func() (protoreflect.Value, error) {
+			return d.unmarshalScalar(fd.MapValue())
+		}
+	}
+
+Loop:
+	for {
+		// Read field name.
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		default:
+			return d.unexpectedTokenError(tok)
+		case json.ObjectClose:
+			break Loop
+		case json.Name:
+			// Continue.
+		}
+
+		// Unmarshal field name.
+		pkey, err := d.unmarshalMapKey(tok, fd.MapKey())
+		if err != nil {
+			return err
+		}
+
+		// Check for duplicate field name.
+		if mmap.Has(pkey) {
+			return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString())
+		}
+
+		// Read and unmarshal field value.
+		pval, err := unmarshalMapValue()
+		if err != nil {
+			return err
+		}
+
+		mmap.Set(pkey, pval)
+	}
+
+	return nil
+}
+
+// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
+// A map key type is any integral or string type.
+func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) {
+	const b32 = 32
+	const b64 = 64
+	const base10 = 10
+
+	name := tok.Name()
+	kind := fd.Kind()
+	switch kind {
+	case protoreflect.StringKind:
+		return protoreflect.ValueOfString(name).MapKey(), nil
+
+	case protoreflect.BoolKind:
+		switch name {
+		case "true":
+			return protoreflect.ValueOfBool(true).MapKey(), nil
+		case "false":
+			return protoreflect.ValueOfBool(false).MapKey(), nil
+		}
+
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		if n, err := strconv.ParseInt(name, base10, b32); err == nil {
+			return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil
+		}
+
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		if n, err := strconv.ParseInt(name, base10, b64); err == nil {
+			return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil
+		}
+
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		if n, err := strconv.ParseUint(name, base10, b32); err == nil {
+			return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil
+		}
+
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		if n, err := strconv.ParseUint(name, base10, b64); err == nil {
+			return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil
+		}
+
+	default:
+		panic(fmt.Sprintf("invalid kind for map key: %v", kind))
+	}
+
+	return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
new file mode 100644
index 000000000..00ea2fecf
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protojson marshals and unmarshals protocol buffer messages as JSON
+// format. It follows the guide at
+// https://developers.google.com/protocol-buffers/docs/proto3#json.
+//
+// This package produces a different output than the standard "encoding/json"
+// package, which does not operate correctly on protocol buffer messages.
+package protojson
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
new file mode 100644
index 000000000..d09d22e13
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -0,0 +1,343 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protojson
+
+import (
+	"encoding/base64"
+	"fmt"
+
+	"google.golang.org/protobuf/internal/encoding/json"
+	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/filedesc"
+	"google.golang.org/protobuf/internal/flags"
+	"google.golang.org/protobuf/internal/genid"
+	"google.golang.org/protobuf/internal/order"
+	"google.golang.org/protobuf/internal/pragma"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const defaultIndent = "  "
+
+// Format formats the message as a multiline string.
+// This function is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func Format(m proto.Message) string {
+	return MarshalOptions{Multiline: true}.Format(m)
+}
+
+// Marshal writes the given proto.Message in JSON format using default options.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func Marshal(m proto.Message) ([]byte, error) {
+	return MarshalOptions{}.Marshal(m)
+}
+
+// MarshalOptions is a configurable JSON format marshaler.
+type MarshalOptions struct {
+	pragma.NoUnkeyedLiterals
+
+	// Multiline specifies whether the marshaler should format the output in
+	// indented-form with every textual element on a new line.
+	// If Indent is an empty string, then an arbitrary indent is chosen.
+	Multiline bool
+
+	// Indent specifies the set of indentation characters to use in a multiline
+	// formatted output such that every entry is preceded by Indent and
+	// terminated by a newline. If non-empty, then Multiline is treated as true.
+	// Indent can only be composed of space or tab characters.
+	Indent string
+
+	// AllowPartial allows messages that have missing required fields to marshal
+	// without returning an error. If AllowPartial is false (the default),
+	// Marshal will return error if there are any missing required fields.
+	AllowPartial bool
+
+	// UseProtoNames uses proto field name instead of lowerCamelCase name in JSON
+	// field names.
+	UseProtoNames bool
+
+	// UseEnumNumbers emits enum values as numbers.
+	UseEnumNumbers bool
+
+	// EmitUnpopulated specifies whether to emit unpopulated fields. It does not
+	// emit unpopulated oneof fields or unpopulated extension fields.
+	// The JSON value emitted for unpopulated fields are as follows:
+	//  ╔═══════╤════════════════════════════╗
+	//  ║ JSON  │ Protobuf field             ║
+	//  ╠═══════╪════════════════════════════╣
+	//  ║ false │ proto3 boolean fields      ║
+	//  ║ 0     │ proto3 numeric fields      ║
+	//  ║ ""    │ proto3 string/bytes fields ║
+	//  ║ null  │ proto2 scalar fields       ║
+	//  ║ null  │ message fields             ║
+	//  ║ []    │ list fields                ║
+	//  ║ {}    │ map fields                 ║
+	//  ╚═══════╧════════════════════════════╝
+	EmitUnpopulated bool
+
+	// Resolver is used for looking up types when expanding google.protobuf.Any
+	// messages. If nil, this defaults to using protoregistry.GlobalTypes.
+	Resolver interface {
+		protoregistry.ExtensionTypeResolver
+		protoregistry.MessageTypeResolver
+	}
+}
+
+// Format formats the message as a string.
+// This method is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func (o MarshalOptions) Format(m proto.Message) string {
+	if m == nil || !m.ProtoReflect().IsValid() {
+		return "<nil>" // invalid syntax, but okay since this is for debugging
+	}
+	o.AllowPartial = true
+	b, _ := o.Marshal(m)
+	return string(b)
+}
+
+// Marshal marshals the given proto.Message in the JSON format using options in
+// MarshalOptions. Do not depend on the output being stable. It may change over
+// time across different versions of the program.
+func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
+	return o.marshal(m)
+}
+
+// marshal is a centralized function that all marshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for marshal that do not go through this.
+func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
+	if o.Multiline && o.Indent == "" {
+		o.Indent = defaultIndent
+	}
+	if o.Resolver == nil {
+		o.Resolver = protoregistry.GlobalTypes
+	}
+
+	internalEnc, err := json.NewEncoder(o.Indent)
+	if err != nil {
+		return nil, err
+	}
+
+	// Treat nil message interface as an empty message,
+	// in which case the output in an empty JSON object.
+	if m == nil {
+		return []byte("{}"), nil
+	}
+
+	enc := encoder{internalEnc, o}
+	if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil {
+		return nil, err
+	}
+	if o.AllowPartial {
+		return enc.Bytes(), nil
+	}
+	return enc.Bytes(), proto.CheckInitialized(m)
+}
+
+type encoder struct {
+	*json.Encoder
+	opts MarshalOptions
+}
+
+// typeFieldDesc is a synthetic field descriptor used for the "@type" field.
+var typeFieldDesc = func() protoreflect.FieldDescriptor {
+	var fd filedesc.Field
+	fd.L0.FullName = "@type"
+	fd.L0.Index = -1
+	fd.L1.Cardinality = protoreflect.Optional
+	fd.L1.Kind = protoreflect.StringKind
+	return &fd
+}()
+
+// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method
+// to additionally iterate over a synthetic field for the type URL.
+type typeURLFieldRanger struct {
+	order.FieldRanger
+	typeURL string
+}
+
+func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+	if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) {
+		return
+	}
+	m.FieldRanger.Range(f)
+}
+
+// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
+// method to additionally iterate over unpopulated fields.
+type unpopulatedFieldRanger struct{ protoreflect.Message }
+
+func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+	fds := m.Descriptor().Fields()
+	for i := 0; i < fds.Len(); i++ {
+		fd := fds.Get(i)
+		if m.Has(fd) || fd.ContainingOneof() != nil {
+			continue // ignore populated fields and fields within a oneofs
+		}
+
+		v := m.Get(fd)
+		isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
+		isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
+		if isProto2Scalar || isSingularMessage {
+			v = protoreflect.Value{} // use invalid value to emit null
+		}
+		if !f(fd, v) {
+			return
+		}
+	}
+	m.Message.Range(f)
+}
+
+// marshalMessage marshals the fields in the given protoreflect.Message.
+// If the typeURL is non-empty, then a synthetic "@type" field is injected
+// containing the URL as the value.
+func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
+	if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
+		return errors.New("no support for proto1 MessageSets")
+	}
+
+	if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil {
+		return marshal(e, m)
+	}
+
+	e.StartObject()
+	defer e.EndObject()
+
+	var fields order.FieldRanger = m
+	if e.opts.EmitUnpopulated {
+		fields = unpopulatedFieldRanger{m}
+	}
+	if typeURL != "" {
+		fields = typeURLFieldRanger{fields, typeURL}
+	}
+
+	var err error
+	order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+		name := fd.JSONName()
+		if e.opts.UseProtoNames {
+			name = fd.TextName()
+		}
+
+		if err = e.WriteName(name); err != nil {
+			return false
+		}
+		if err = e.marshalValue(v, fd); err != nil {
+			return false
+		}
+		return true
+	})
+	return err
+}
+
+// marshalValue marshals the given protoreflect.Value.
+func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+	switch {
+	case fd.IsList():
+		return e.marshalList(val.List(), fd)
+	case fd.IsMap():
+		return e.marshalMap(val.Map(), fd)
+	default:
+		return e.marshalSingular(val, fd)
+	}
+}
+
+// marshalSingular marshals the given non-repeated field value. This includes
+// all scalar types, enums, messages, and groups.
+func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+	if !val.IsValid() {
+		e.WriteNull()
+		return nil
+	}
+
+	switch kind := fd.Kind(); kind {
+	case protoreflect.BoolKind:
+		e.WriteBool(val.Bool())
+
+	case protoreflect.StringKind:
+		if e.WriteString(val.String()) != nil {
+			return errors.InvalidUTF8(string(fd.FullName()))
+		}
+
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		e.WriteInt(val.Int())
+
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		e.WriteUint(val.Uint())
+
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind,
+		protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind:
+		// 64-bit integers are written out as JSON string.
+		e.WriteString(val.String())
+
+	case protoreflect.FloatKind:
+		// Encoder.WriteFloat handles the special numbers NaN and infinites.
+		e.WriteFloat(val.Float(), 32)
+
+	case protoreflect.DoubleKind:
+		// Encoder.WriteFloat handles the special numbers NaN and infinites.
+		e.WriteFloat(val.Float(), 64)
+
+	case protoreflect.BytesKind:
+		e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
+
+	case protoreflect.EnumKind:
+		if fd.Enum().FullName() == genid.NullValue_enum_fullname {
+			e.WriteNull()
+		} else {
+			desc := fd.Enum().Values().ByNumber(val.Enum())
+			if e.opts.UseEnumNumbers || desc == nil {
+				e.WriteInt(int64(val.Enum()))
+			} else {
+				e.WriteString(string(desc.Name()))
+			}
+		}
+
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		if err := e.marshalMessage(val.Message(), ""); err != nil {
+			return err
+		}
+
+	default:
+		panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
+	}
+	return nil
+}
+
+// marshalList marshals the given protoreflect.List.
+func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
+	e.StartArray()
+	defer e.EndArray()
+
+	for i := 0; i < list.Len(); i++ {
+		item := list.Get(i)
+		if err := e.marshalSingular(item, fd); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// marshalMap marshals given protoreflect.Map.
+func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
+	e.StartObject()
+	defer e.EndObject()
+
+	var err error
+	order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool {
+		if err = e.WriteName(k.String()); err != nil {
+			return false
+		}
+		if err = e.marshalSingular(v, fd.MapValue()); err != nil {
+			return false
+		}
+		return true
+	})
+	return err
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
new file mode 100644
index 000000000..c85f84694
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -0,0 +1,889 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protojson
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+
+	"google.golang.org/protobuf/internal/encoding/json"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/genid"
+	"google.golang.org/protobuf/internal/strs"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type marshalFunc func(encoder, protoreflect.Message) error
+
+// wellKnownTypeMarshaler returns a marshal function if the message type
+// has specialized serialization behavior. It returns nil otherwise.
+func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc {
+	if name.Parent() == genid.GoogleProtobuf_package {
+		switch name.Name() {
+		case genid.Any_message_name:
+			return encoder.marshalAny
+		case genid.Timestamp_message_name:
+			return encoder.marshalTimestamp
+		case genid.Duration_message_name:
+			return encoder.marshalDuration
+		case genid.BoolValue_message_name,
+			genid.Int32Value_message_name,
+			genid.Int64Value_message_name,
+			genid.UInt32Value_message_name,
+			genid.UInt64Value_message_name,
+			genid.FloatValue_message_name,
+			genid.DoubleValue_message_name,
+			genid.StringValue_message_name,
+			genid.BytesValue_message_name:
+			return encoder.marshalWrapperType
+		case genid.Struct_message_name:
+			return encoder.marshalStruct
+		case genid.ListValue_message_name:
+			return encoder.marshalListValue
+		case genid.Value_message_name:
+			return encoder.marshalKnownValue
+		case genid.FieldMask_message_name:
+			return encoder.marshalFieldMask
+		case genid.Empty_message_name:
+			return encoder.marshalEmpty
+		}
+	}
+	return nil
+}
+
+type unmarshalFunc func(decoder, protoreflect.Message) error
+
+// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
+// has specialized serialization behavior. It returns nil otherwise.
+func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc {
+	if name.Parent() == genid.GoogleProtobuf_package {
+		switch name.Name() {
+		case genid.Any_message_name:
+			return decoder.unmarshalAny
+		case genid.Timestamp_message_name:
+			return decoder.unmarshalTimestamp
+		case genid.Duration_message_name:
+			return decoder.unmarshalDuration
+		case genid.BoolValue_message_name,
+			genid.Int32Value_message_name,
+			genid.Int64Value_message_name,
+			genid.UInt32Value_message_name,
+			genid.UInt64Value_message_name,
+			genid.FloatValue_message_name,
+			genid.DoubleValue_message_name,
+			genid.StringValue_message_name,
+			genid.BytesValue_message_name:
+			return decoder.unmarshalWrapperType
+		case genid.Struct_message_name:
+			return decoder.unmarshalStruct
+		case genid.ListValue_message_name:
+			return decoder.unmarshalListValue
+		case genid.Value_message_name:
+			return decoder.unmarshalKnownValue
+		case genid.FieldMask_message_name:
+			return decoder.unmarshalFieldMask
+		case genid.Empty_message_name:
+			return decoder.unmarshalEmpty
+		}
+	}
+	return nil
+}
+
+// The JSON representation of an Any message uses the regular representation of
+// the deserialized, embedded message, with an additional field `@type` which
+// contains the type URL. If the embedded message type is well-known and has a
+// custom JSON representation, that representation will be embedded adding a
+// field `value` which holds the custom JSON in addition to the `@type` field.
+
+func (e encoder) marshalAny(m protoreflect.Message) error {
+	fds := m.Descriptor().Fields()
+	fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
+	fdValue := fds.ByNumber(genid.Any_Value_field_number)
+
+	if !m.Has(fdType) {
+		if !m.Has(fdValue) {
+			// If message is empty, marshal out empty JSON object.
+			e.StartObject()
+			e.EndObject()
+			return nil
+		} else {
+			// Return error if type_url field is not set, but value is set.
+			return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name)
+		}
+	}
+
+	typeVal := m.Get(fdType)
+	valueVal := m.Get(fdValue)
+
+	// Resolve the type in order to unmarshal value field.
+	typeURL := typeVal.String()
+	emt, err := e.opts.Resolver.FindMessageByURL(typeURL)
+	if err != nil {
+		return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err)
+	}
+
+	em := emt.New()
+	err = proto.UnmarshalOptions{
+		AllowPartial: true, // never check required fields inside an Any
+		Resolver:     e.opts.Resolver,
+	}.Unmarshal(valueVal.Bytes(), em.Interface())
+	if err != nil {
+		return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err)
+	}
+
+	// If type of value has custom JSON encoding, marshal out a field "value"
+	// with corresponding custom JSON encoding of the embedded message as a
+	// field.
+	if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil {
+		e.StartObject()
+		defer e.EndObject()
+
+		// Marshal out @type field.
+		e.WriteName("@type")
+		if err := e.WriteString(typeURL); err != nil {
+			return err
+		}
+
+		e.WriteName("value")
+		return marshal(e, em)
+	}
+
+	// Else, marshal out the embedded message's fields in this Any object.
+	if err := e.marshalMessage(em, typeURL); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (d decoder) unmarshalAny(m protoreflect.Message) error {
+	// Peek to check for json.ObjectOpen to avoid advancing a read.
+	start, err := d.Peek()
+	if err != nil {
+		return err
+	}
+	if start.Kind() != json.ObjectOpen {
+		return d.unexpectedTokenError(start)
+	}
+
+	// Use another decoder to parse the unread bytes for @type field. This
+	// avoids advancing a read from current decoder because the current JSON
+	// object may contain the fields of the embedded type.
+	dec := decoder{d.Clone(), UnmarshalOptions{}}
+	tok, err := findTypeURL(dec)
+	switch err {
+	case errEmptyObject:
+		// An empty JSON object translates to an empty Any message.
+		d.Read() // Read json.ObjectOpen.
+		d.Read() // Read json.ObjectClose.
+		return nil
+
+	case errMissingType:
+		if d.opts.DiscardUnknown {
+			// Treat all fields as unknowns, similar to an empty object.
+			return d.skipJSONValue()
+		}
+		// Use start.Pos() for line position.
+		return d.newError(start.Pos(), err.Error())
+
+	default:
+		if err != nil {
+			return err
+		}
+	}
+
+	typeURL := tok.ParsedString()
+	emt, err := d.opts.Resolver.FindMessageByURL(typeURL)
+	if err != nil {
+		return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err)
+	}
+
+	// Create new message for the embedded message type and unmarshal into it.
+	em := emt.New()
+	if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil {
+		// If embedded message is a custom type,
+		// unmarshal the JSON "value" field into it.
+		if err := d.unmarshalAnyValue(unmarshal, em); err != nil {
+			return err
+		}
+	} else {
+		// Else unmarshal the current JSON object into it.
+		if err := d.unmarshalMessage(em, true); err != nil {
+			return err
+		}
+	}
+	// Serialize the embedded message and assign the resulting bytes to the
+	// proto value field.
+	b, err := proto.MarshalOptions{
+		AllowPartial:  true, // No need to check required fields inside an Any.
+		Deterministic: true,
+	}.Marshal(em.Interface())
+	if err != nil {
+		return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err)
+	}
+
+	fds := m.Descriptor().Fields()
+	fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
+	fdValue := fds.ByNumber(genid.Any_Value_field_number)
+
+	m.Set(fdType, protoreflect.ValueOfString(typeURL))
+	m.Set(fdValue, protoreflect.ValueOfBytes(b))
+	return nil
+}
+
+var errEmptyObject = fmt.Errorf(`empty object`)
+var errMissingType = fmt.Errorf(`missing "@type" field`)
+
+// findTypeURL returns the token for the "@type" field value from the given
+// JSON bytes. It is expected that the given bytes start with json.ObjectOpen.
+// It returns errEmptyObject if the JSON object is empty or errMissingType if
+// @type field does not exist. It returns other error if the @type field is not
+// valid or other decoding issues.
+func findTypeURL(d decoder) (json.Token, error) {
+	var typeURL string
+	var typeTok json.Token
+	numFields := 0
+	// Skip start object.
+	d.Read()
+
+Loop:
+	for {
+		tok, err := d.Read()
+		if err != nil {
+			return json.Token{}, err
+		}
+
+		switch tok.Kind() {
+		case json.ObjectClose:
+			if typeURL == "" {
+				// Did not find @type field.
+				if numFields > 0 {
+					return json.Token{}, errMissingType
+				}
+				return json.Token{}, errEmptyObject
+			}
+			break Loop
+
+		case json.Name:
+			numFields++
+			if tok.Name() != "@type" {
+				// Skip value.
+				if err := d.skipJSONValue(); err != nil {
+					return json.Token{}, err
+				}
+				continue
+			}
+
+			// Return error if this was previously set already.
+			if typeURL != "" {
+				return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`)
+			}
+			// Read field value.
+			tok, err := d.Read()
+			if err != nil {
+				return json.Token{}, err
+			}
+			if tok.Kind() != json.String {
+				return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString())
+			}
+			typeURL = tok.ParsedString()
+			if typeURL == "" {
+				return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`)
+			}
+			typeTok = tok
+		}
+	}
+
+	return typeTok, nil
+}
+
+// skipJSONValue parses a JSON value (null, boolean, string, number, object and
+// array) in order to advance the read to the next JSON value. It relies on
+// the decoder returning an error if the types are not in valid sequence.
+func (d decoder) skipJSONValue() error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	// Only need to continue reading for objects and arrays.
+	switch tok.Kind() {
+	case json.ObjectOpen:
+		for {
+			tok, err := d.Read()
+			if err != nil {
+				return err
+			}
+			switch tok.Kind() {
+			case json.ObjectClose:
+				return nil
+			case json.Name:
+				// Skip object field value.
+				if err := d.skipJSONValue(); err != nil {
+					return err
+				}
+			}
+		}
+
+	case json.ArrayOpen:
+		for {
+			tok, err := d.Peek()
+			if err != nil {
+				return err
+			}
+			switch tok.Kind() {
+			case json.ArrayClose:
+				d.Read()
+				return nil
+			default:
+				// Skip array item.
+				if err := d.skipJSONValue(); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// unmarshalAnyValue unmarshals the given custom-type message from the JSON
+// object's "value" field.
+func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error {
+	// Skip ObjectOpen, and start reading the fields.
+	d.Read()
+
+	var found bool // Used for detecting duplicate "value".
+	for {
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		case json.ObjectClose:
+			if !found {
+				return d.newError(tok.Pos(), `missing "value" field`)
+			}
+			return nil
+
+		case json.Name:
+			switch tok.Name() {
+			case "@type":
+				// Skip the value as this was previously parsed already.
+				d.Read()
+
+			case "value":
+				if found {
+					return d.newError(tok.Pos(), `duplicate "value" field`)
+				}
+				// Unmarshal the field value into the given message.
+				if err := unmarshal(d, m); err != nil {
+					return err
+				}
+				found = true
+
+			default:
+				if d.opts.DiscardUnknown {
+					if err := d.skipJSONValue(); err != nil {
+						return err
+					}
+					continue
+				}
+				return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
+			}
+		}
+	}
+}
+
+// Wrapper types are encoded as JSON primitives like string, number or boolean.
+
+func (e encoder) marshalWrapperType(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
+	val := m.Get(fd)
+	return e.marshalSingular(val, fd)
+}
+
+func (d decoder) unmarshalWrapperType(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
+	val, err := d.unmarshalScalar(fd)
+	if err != nil {
+		return err
+	}
+	m.Set(fd, val)
+	return nil
+}
+
+// The JSON representation for Empty is an empty JSON object.
+
+func (e encoder) marshalEmpty(protoreflect.Message) error {
+	e.StartObject()
+	e.EndObject()
+	return nil
+}
+
+func (d decoder) unmarshalEmpty(protoreflect.Message) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.ObjectOpen {
+		return d.unexpectedTokenError(tok)
+	}
+
+	for {
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		case json.ObjectClose:
+			return nil
+
+		case json.Name:
+			if d.opts.DiscardUnknown {
+				if err := d.skipJSONValue(); err != nil {
+					return err
+				}
+				continue
+			}
+			return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
+
+		default:
+			return d.unexpectedTokenError(tok)
+		}
+	}
+}
+
+// The JSON representation for Struct is a JSON object that contains the encoded
+// Struct.fields map and follows the serialization rules for a map.
+
+func (e encoder) marshalStruct(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
+	return e.marshalMap(m.Get(fd).Map(), fd)
+}
+
+func (d decoder) unmarshalStruct(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
+	return d.unmarshalMap(m.Mutable(fd).Map(), fd)
+}
+
+// The JSON representation for ListValue is JSON array that contains the encoded
+// ListValue.values repeated field and follows the serialization rules for a
+// repeated field.
+
+func (e encoder) marshalListValue(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
+	return e.marshalList(m.Get(fd).List(), fd)
+}
+
+func (d decoder) unmarshalListValue(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
+	return d.unmarshalList(m.Mutable(fd).List(), fd)
+}
+
+// The JSON representation for a Value is dependent on the oneof field that is
+// set. Each of the field in the oneof has its own custom serialization rule. A
+// Value message needs to be a oneof field set, else it is an error.
+
+func (e encoder) marshalKnownValue(m protoreflect.Message) error {
+	od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
+	fd := m.WhichOneof(od)
+	if fd == nil {
+		return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname)
+	}
+	if fd.Number() == genid.Value_NumberValue_field_number {
+		if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) {
+			return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v)
+		}
+	}
+	return e.marshalSingular(m.Get(fd), fd)
+}
+
+func (d decoder) unmarshalKnownValue(m protoreflect.Message) error {
+	tok, err := d.Peek()
+	if err != nil {
+		return err
+	}
+
+	var fd protoreflect.FieldDescriptor
+	var val protoreflect.Value
+	switch tok.Kind() {
+	case json.Null:
+		d.Read()
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
+		val = protoreflect.ValueOfEnum(0)
+
+	case json.Bool:
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
+		val = protoreflect.ValueOfBool(tok.Bool())
+
+	case json.Number:
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number)
+		var ok bool
+		val, ok = unmarshalFloat(tok, 64)
+		if !ok {
+			return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
+		}
+
+	case json.String:
+		// A JSON string may have been encoded from the number_value field,
+		// e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows
+		// for it to be in JSON string form. Given this custom encoding spec,
+		// however, there is no way to identify that and hence a JSON string is
+		// always assigned to the string_value field, which means that certain
+		// encoding cannot be parsed back to the same field.
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
+		val = protoreflect.ValueOfString(tok.ParsedString())
+
+	case json.ObjectOpen:
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
+		val = m.NewField(fd)
+		if err := d.unmarshalStruct(val.Message()); err != nil {
+			return err
+		}
+
+	case json.ArrayOpen:
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number)
+		val = m.NewField(fd)
+		if err := d.unmarshalListValue(val.Message()); err != nil {
+			return err
+		}
+
+	default:
+		return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
+	}
+
+	m.Set(fd, val)
+	return nil
+}
+
+// The JSON representation for a Duration is a JSON string that ends in the
+// suffix "s" (indicating seconds) and is preceded by the number of seconds,
+// with nanoseconds expressed as fractional seconds.
+//
+// Durations less than one second are represented with a 0 seconds field and a
+// positive or negative nanos field. For durations of one second or more, a
+// non-zero value for the nanos field must be of the same sign as the seconds
+// field.
+//
+// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive.
+// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive.
+
+const (
+	secondsInNanos       = 999999999
+	maxSecondsInDuration = 315576000000
+)
+
+func (e encoder) marshalDuration(m protoreflect.Message) error {
+	fds := m.Descriptor().Fields()
+	fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
+	fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
+
+	secsVal := m.Get(fdSeconds)
+	nanosVal := m.Get(fdNanos)
+	secs := secsVal.Int()
+	nanos := nanosVal.Int()
+	if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
+		return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs)
+	}
+	if nanos < -secondsInNanos || nanos > secondsInNanos {
+		return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos)
+	}
+	if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) {
+		return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname)
+	}
+	// Generated output always contains 0, 3, 6, or 9 fractional digits,
+	// depending on required precision, followed by the suffix "s".
+	var sign string
+	if secs < 0 || nanos < 0 {
+		sign, secs, nanos = "-", -1*secs, -1*nanos
+	}
+	x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos)
+	x = strings.TrimSuffix(x, "000")
+	x = strings.TrimSuffix(x, "000")
+	x = strings.TrimSuffix(x, ".000")
+	e.WriteString(x + "s")
+	return nil
+}
+
+func (d decoder) unmarshalDuration(m protoreflect.Message) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.String {
+		return d.unexpectedTokenError(tok)
+	}
+
+	secs, nanos, ok := parseDuration(tok.ParsedString())
+	if !ok {
+		return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString())
+	}
+	// Validate seconds. No need to validate nanos because parseDuration would
+	// have covered that already.
+	if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
+		return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString())
+	}
+
+	fds := m.Descriptor().Fields()
+	fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
+	fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
+
+	m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
+	m.Set(fdNanos, protoreflect.ValueOfInt32(nanos))
+	return nil
+}
+
+// parseDuration parses the given input string for seconds and nanoseconds value
+// for the Duration JSON format. The format is a decimal number with a suffix
+// 's'. It can have optional plus/minus sign. There needs to be at least an
+// integer or fractional part. Fractional part is limited to 9 digits only for
+// nanoseconds precision, regardless of whether there are trailing zero digits.
+// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s.
+func parseDuration(input string) (int64, int32, bool) {
+	b := []byte(input)
+	size := len(b)
+	if size < 2 {
+		return 0, 0, false
+	}
+	if b[size-1] != 's' {
+		return 0, 0, false
+	}
+	b = b[:size-1]
+
+	// Read optional plus/minus symbol.
+	var neg bool
+	switch b[0] {
+	case '-':
+		neg = true
+		b = b[1:]
+	case '+':
+		b = b[1:]
+	}
+	if len(b) == 0 {
+		return 0, 0, false
+	}
+
+	// Read the integer part.
+	var intp []byte
+	switch {
+	case b[0] == '0':
+		b = b[1:]
+
+	case '1' <= b[0] && b[0] <= '9':
+		intp = b[0:]
+		b = b[1:]
+		n := 1
+		for len(b) > 0 && '0' <= b[0] && b[0] <= '9' {
+			n++
+			b = b[1:]
+		}
+		intp = intp[:n]
+
+	case b[0] == '.':
+		// Continue below.
+
+	default:
+		return 0, 0, false
+	}
+
+	hasFrac := false
+	var frac [9]byte
+	if len(b) > 0 {
+		if b[0] != '.' {
+			return 0, 0, false
+		}
+		// Read the fractional part.
+		b = b[1:]
+		n := 0
+		for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' {
+			frac[n] = b[0]
+			n++
+			b = b[1:]
+		}
+		// It is not valid if there are more bytes left.
+		if len(b) > 0 {
+			return 0, 0, false
+		}
+		// Pad fractional part with 0s.
+		for i := n; i < 9; i++ {
+			frac[i] = '0'
+		}
+		hasFrac = true
+	}
+
+	var secs int64
+	if len(intp) > 0 {
+		var err error
+		secs, err = strconv.ParseInt(string(intp), 10, 64)
+		if err != nil {
+			return 0, 0, false
+		}
+	}
+
+	var nanos int64
+	if hasFrac {
+		nanob := bytes.TrimLeft(frac[:], "0")
+		if len(nanob) > 0 {
+			var err error
+			nanos, err = strconv.ParseInt(string(nanob), 10, 32)
+			if err != nil {
+				return 0, 0, false
+			}
+		}
+	}
+
+	if neg {
+		if secs > 0 {
+			secs = -secs
+		}
+		if nanos > 0 {
+			nanos = -nanos
+		}
+	}
+	return secs, int32(nanos), true
+}
+
+// The JSON representation for a Timestamp is a JSON string in the RFC 3339
+// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where
+// {year} is always expressed using four digits while {month}, {day}, {hour},
+// {min}, and {sec} are zero-padded to two digits each. The fractional seconds,
+// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The
+// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding
+// should always use UTC (as indicated by "Z") and a decoder should be able to
+// accept both UTC and other timezones (as indicated by an offset).
+//
+// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z
+// inclusive.
+// Timestamp.nanos must be from 0 to 999,999,999 inclusive.
+
+const (
+	maxTimestampSeconds = 253402300799
+	minTimestampSeconds = -62135596800
+)
+
+func (e encoder) marshalTimestamp(m protoreflect.Message) error {
+	fds := m.Descriptor().Fields()
+	fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
+	fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
+
+	secsVal := m.Get(fdSeconds)
+	nanosVal := m.Get(fdNanos)
+	secs := secsVal.Int()
+	nanos := nanosVal.Int()
+	if secs < minTimestampSeconds || secs > maxTimestampSeconds {
+		return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs)
+	}
+	if nanos < 0 || nanos > secondsInNanos {
+		return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos)
+	}
+	// Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3,
+	// 6 or 9 fractional digits.
+	t := time.Unix(secs, nanos).UTC()
+	x := t.Format("2006-01-02T15:04:05.000000000")
+	x = strings.TrimSuffix(x, "000")
+	x = strings.TrimSuffix(x, "000")
+	x = strings.TrimSuffix(x, ".000")
+	e.WriteString(x + "Z")
+	return nil
+}
+
+func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.String {
+		return d.unexpectedTokenError(tok)
+	}
+
+	t, err := time.Parse(time.RFC3339Nano, tok.ParsedString())
+	if err != nil {
+		return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
+	}
+	// Validate seconds. No need to validate nanos because time.Parse would have
+	// covered that already.
+	secs := t.Unix()
+	if secs < minTimestampSeconds || secs > maxTimestampSeconds {
+		return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
+	}
+
+	fds := m.Descriptor().Fields()
+	fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
+	fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
+
+	m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
+	m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond())))
+	return nil
+}
+
+// The JSON representation for a FieldMask is a JSON string where paths are
+// separated by a comma. Fields name in each path are converted to/from
+// lower-camel naming conventions. Encoding should fail if the path name would
+// end up differently after a round-trip.
+
+func (e encoder) marshalFieldMask(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
+	list := m.Get(fd).List()
+	paths := make([]string, 0, list.Len())
+
+	for i := 0; i < list.Len(); i++ {
+		s := list.Get(i).String()
+		if !protoreflect.FullName(s).IsValid() {
+			return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
+		}
+		// Return error if conversion to camelCase is not reversible.
+		cc := strs.JSONCamelCase(s)
+		if s != strs.JSONSnakeCase(cc) {
+			return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s)
+		}
+		paths = append(paths, cc)
+	}
+
+	e.WriteString(strings.Join(paths, ","))
+	return nil
+}
+
+func (d decoder) unmarshalFieldMask(m protoreflect.Message) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.String {
+		return d.unexpectedTokenError(tok)
+	}
+	str := strings.TrimSpace(tok.ParsedString())
+	if str == "" {
+		return nil
+	}
+	paths := strings.Split(str, ",")
+
+	fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
+	list := m.Mutable(fd).List()
+
+	for _, s0 := range paths {
+		s := strs.JSONSnakeCase(s0)
+		if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() {
+			return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
+		}
+		list.Append(protoreflect.ValueOfString(s))
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
new file mode 100644
index 000000000..b13fd29e8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
@@ -0,0 +1,340 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"regexp"
+	"unicode/utf8"
+
+	"google.golang.org/protobuf/internal/errors"
+)
+
+// call specifies which Decoder method was invoked.
+type call uint8
+
+const (
+	readCall call = iota
+	peekCall
+)
+
+const unexpectedFmt = "unexpected token %s"
+
+// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
+var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
+
+// Decoder is a token-based JSON decoder.
+type Decoder struct {
+	// lastCall is last method called, either readCall or peekCall.
+	// Initial value is readCall.
+	lastCall call
+
+	// lastToken contains the last read token.
+	lastToken Token
+
+	// lastErr contains the last read error.
+	lastErr error
+
+	// openStack is a stack containing ObjectOpen and ArrayOpen values. The
+	// top of stack represents the object or the array the current value is
+	// directly located in.
+	openStack []Kind
+
+	// orig is used in reporting line and column.
+	orig []byte
+	// in contains the unconsumed input.
+	in []byte
+}
+
+// NewDecoder returns a Decoder to read the given []byte.
+func NewDecoder(b []byte) *Decoder {
+	return &Decoder{orig: b, in: b}
+}
+
+// Peek looks ahead and returns the next token kind without advancing a read.
+func (d *Decoder) Peek() (Token, error) {
+	defer func() { d.lastCall = peekCall }()
+	if d.lastCall == readCall {
+		d.lastToken, d.lastErr = d.Read()
+	}
+	return d.lastToken, d.lastErr
+}
+
+// Read returns the next JSON token.
+// It will return an error if there is no valid token.
+func (d *Decoder) Read() (Token, error) {
+	const scalar = Null | Bool | Number | String
+
+	defer func() { d.lastCall = readCall }()
+	if d.lastCall == peekCall {
+		return d.lastToken, d.lastErr
+	}
+
+	tok, err := d.parseNext()
+	if err != nil {
+		return Token{}, err
+	}
+
+	switch tok.kind {
+	case EOF:
+		if len(d.openStack) != 0 ||
+			d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 {
+			return Token{}, ErrUnexpectedEOF
+		}
+
+	case Null:
+		if !d.isValueNext() {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+
+	case Bool, Number:
+		if !d.isValueNext() {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+
+	case String:
+		if d.isValueNext() {
+			break
+		}
+		// This string token should only be for a field name.
+		if d.lastToken.kind&(ObjectOpen|comma) == 0 {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+		if len(d.in) == 0 {
+			return Token{}, ErrUnexpectedEOF
+		}
+		if c := d.in[0]; c != ':' {
+			return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c))
+		}
+		tok.kind = Name
+		d.consume(1)
+
+	case ObjectOpen, ArrayOpen:
+		if !d.isValueNext() {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+		d.openStack = append(d.openStack, tok.kind)
+
+	case ObjectClose:
+		if len(d.openStack) == 0 ||
+			d.lastToken.kind == comma ||
+			d.openStack[len(d.openStack)-1] != ObjectOpen {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+		d.openStack = d.openStack[:len(d.openStack)-1]
+
+	case ArrayClose:
+		if len(d.openStack) == 0 ||
+			d.lastToken.kind == comma ||
+			d.openStack[len(d.openStack)-1] != ArrayOpen {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+		d.openStack = d.openStack[:len(d.openStack)-1]
+
+	case comma:
+		if len(d.openStack) == 0 ||
+			d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+	}
+
+	// Update d.lastToken only after validating token to be in the right sequence.
+	d.lastToken = tok
+
+	if d.lastToken.kind == comma {
+		return d.Read()
+	}
+	return tok, nil
+}
+
+// Any sequence that looks like a non-delimiter (for error reporting).
+var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`)
+
+// parseNext parses for the next JSON token. It returns a Token object for
+// different types, except for Name. It does not handle whether the next token
+// is in a valid sequence or not.
+func (d *Decoder) parseNext() (Token, error) {
+	// Trim leading spaces.
+	d.consume(0)
+
+	in := d.in
+	if len(in) == 0 {
+		return d.consumeToken(EOF, 0), nil
+	}
+
+	switch in[0] {
+	case 'n':
+		if n := matchWithDelim("null", in); n != 0 {
+			return d.consumeToken(Null, n), nil
+		}
+
+	case 't':
+		if n := matchWithDelim("true", in); n != 0 {
+			return d.consumeBoolToken(true, n), nil
+		}
+
+	case 'f':
+		if n := matchWithDelim("false", in); n != 0 {
+			return d.consumeBoolToken(false, n), nil
+		}
+
+	case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		if n, ok := parseNumber(in); ok {
+			return d.consumeToken(Number, n), nil
+		}
+
+	case '"':
+		s, n, err := d.parseString(in)
+		if err != nil {
+			return Token{}, err
+		}
+		return d.consumeStringToken(s, n), nil
+
+	case '{':
+		return d.consumeToken(ObjectOpen, 1), nil
+
+	case '}':
+		return d.consumeToken(ObjectClose, 1), nil
+
+	case '[':
+		return d.consumeToken(ArrayOpen, 1), nil
+
+	case ']':
+		return d.consumeToken(ArrayClose, 1), nil
+
+	case ',':
+		return d.consumeToken(comma, 1), nil
+	}
+	return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in))
+}
+
+// newSyntaxError returns an error with line and column information useful for
+// syntax errors.
+func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
+	e := errors.New(f, x...)
+	line, column := d.Position(pos)
+	return errors.New("syntax error (line %d:%d): %v", line, column, e)
+}
+
+// Position returns line and column number of given index of the original input.
+// It will panic if index is out of range.
+func (d *Decoder) Position(idx int) (line int, column int) {
+	b := d.orig[:idx]
+	line = bytes.Count(b, []byte("\n")) + 1
+	if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
+		b = b[i+1:]
+	}
+	column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
+	return line, column
+}
+
+// currPos returns the current index position of d.in from d.orig.
+func (d *Decoder) currPos() int {
+	return len(d.orig) - len(d.in)
+}
+
+// matchWithDelim matches s with the input b and verifies that the match
+// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]").
+// As a special case, EOF is considered a delimiter. It returns the length of s
+// if there is a match, else 0.
+func matchWithDelim(s string, b []byte) int {
+	if !bytes.HasPrefix(b, []byte(s)) {
+		return 0
+	}
+
+	n := len(s)
+	if n < len(b) && isNotDelim(b[n]) {
+		return 0
+	}
+	return n
+}
+
+// isNotDelim returns true if given byte is a not delimiter character.
+func isNotDelim(c byte) bool {
+	return (c == '-' || c == '+' || c == '.' || c == '_' ||
+		('a' <= c && c <= 'z') ||
+		('A' <= c && c <= 'Z') ||
+		('0' <= c && c <= '9'))
+}
+
+// consume consumes n bytes of input and any subsequent whitespace.
+func (d *Decoder) consume(n int) {
+	d.in = d.in[n:]
+	for len(d.in) > 0 {
+		switch d.in[0] {
+		case ' ', '\n', '\r', '\t':
+			d.in = d.in[1:]
+		default:
+			return
+		}
+	}
+}
+
+// isValueNext returns true if next type should be a JSON value: Null,
+// Number, String or Bool.
+func (d *Decoder) isValueNext() bool {
+	if len(d.openStack) == 0 {
+		return d.lastToken.kind == 0
+	}
+
+	start := d.openStack[len(d.openStack)-1]
+	switch start {
+	case ObjectOpen:
+		return d.lastToken.kind&Name != 0
+	case ArrayOpen:
+		return d.lastToken.kind&(ArrayOpen|comma) != 0
+	}
+	panic(fmt.Sprintf(
+		"unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v",
+		d.lastToken.kind, start))
+}
+
+// consumeToken constructs a Token for given Kind with raw value derived from
+// current d.in and given size, and consumes the given size-lenght of it.
+func (d *Decoder) consumeToken(kind Kind, size int) Token {
+	tok := Token{
+		kind: kind,
+		raw:  d.in[:size],
+		pos:  len(d.orig) - len(d.in),
+	}
+	d.consume(size)
+	return tok
+}
+
+// consumeBoolToken constructs a Token for a Bool kind with raw value derived from
+// current d.in and given size.
+func (d *Decoder) consumeBoolToken(b bool, size int) Token {
+	tok := Token{
+		kind: Bool,
+		raw:  d.in[:size],
+		pos:  len(d.orig) - len(d.in),
+		boo:  b,
+	}
+	d.consume(size)
+	return tok
+}
+
+// consumeStringToken constructs a Token for a String kind with raw value derived
+// from current d.in and given size.
+func (d *Decoder) consumeStringToken(s string, size int) Token {
+	tok := Token{
+		kind: String,
+		raw:  d.in[:size],
+		pos:  len(d.orig) - len(d.in),
+		str:  s,
+	}
+	d.consume(size)
+	return tok
+}
+
+// Clone returns a copy of the Decoder for use in reading ahead the next JSON
+// object, array or other values without affecting current Decoder.
+func (d *Decoder) Clone() *Decoder {
+	ret := *d
+	ret.openStack = append([]Kind(nil), ret.openStack...)
+	return &ret
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
new file mode 100644
index 000000000..2999d7133
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
@@ -0,0 +1,254 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"strconv"
+)
+
+// parseNumber reads the given []byte for a valid JSON number. If it is valid,
+// it returns the number of bytes.  Parsing logic follows the definition in
+// https://tools.ietf.org/html/rfc7159#section-6, and is based off
+// encoding/json.isValidNumber function.
+func parseNumber(input []byte) (int, bool) {
+	var n int
+
+	s := input
+	if len(s) == 0 {
+		return 0, false
+	}
+
+	// Optional -
+	if s[0] == '-' {
+		s = s[1:]
+		n++
+		if len(s) == 0 {
+			return 0, false
+		}
+	}
+
+	// Digits
+	switch {
+	case s[0] == '0':
+		s = s[1:]
+		n++
+
+	case '1' <= s[0] && s[0] <= '9':
+		s = s[1:]
+		n++
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+
+	default:
+		return 0, false
+	}
+
+	// . followed by 1 or more digits.
+	if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+		s = s[2:]
+		n += 2
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+	}
+
+	// e or E followed by an optional - or + and
+	// 1 or more digits.
+	if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+		s = s[1:]
+		n++
+		if s[0] == '+' || s[0] == '-' {
+			s = s[1:]
+			n++
+			if len(s) == 0 {
+				return 0, false
+			}
+		}
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+	}
+
+	// Check that next byte is a delimiter or it is at the end.
+	if n < len(input) && isNotDelim(input[n]) {
+		return 0, false
+	}
+
+	return n, true
+}
+
+// numberParts is the result of parsing out a valid JSON number. It contains
+// the parts of a number. The parts are used for integer conversion.
+type numberParts struct {
+	neg  bool
+	intp []byte
+	frac []byte
+	exp  []byte
+}
+
+// parseNumber constructs numberParts from given []byte. The logic here is
+// similar to consumeNumber above with the difference of having to construct
+// numberParts. The slice fields in numberParts are subslices of the input.
+func parseNumberParts(input []byte) (numberParts, bool) {
+	var neg bool
+	var intp []byte
+	var frac []byte
+	var exp []byte
+
+	s := input
+	if len(s) == 0 {
+		return numberParts{}, false
+	}
+
+	// Optional -
+	if s[0] == '-' {
+		neg = true
+		s = s[1:]
+		if len(s) == 0 {
+			return numberParts{}, false
+		}
+	}
+
+	// Digits
+	switch {
+	case s[0] == '0':
+		// Skip first 0 and no need to store.
+		s = s[1:]
+
+	case '1' <= s[0] && s[0] <= '9':
+		intp = s
+		n := 1
+		s = s[1:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+		intp = intp[:n]
+
+	default:
+		return numberParts{}, false
+	}
+
+	// . followed by 1 or more digits.
+	if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+		frac = s[1:]
+		n := 1
+		s = s[2:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+		frac = frac[:n]
+	}
+
+	// e or E followed by an optional - or + and
+	// 1 or more digits.
+	if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+		s = s[1:]
+		exp = s
+		n := 0
+		if s[0] == '+' || s[0] == '-' {
+			s = s[1:]
+			n++
+			if len(s) == 0 {
+				return numberParts{}, false
+			}
+		}
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+		exp = exp[:n]
+	}
+
+	return numberParts{
+		neg:  neg,
+		intp: intp,
+		frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right.
+		exp:  exp,
+	}, true
+}
+
+// normalizeToIntString returns an integer string in normal form without the
+// E-notation for given numberParts. It will return false if it is not an
+// integer or if the exponent exceeds than max/min int value.
+func normalizeToIntString(n numberParts) (string, bool) {
+	intpSize := len(n.intp)
+	fracSize := len(n.frac)
+
+	if intpSize == 0 && fracSize == 0 {
+		return "0", true
+	}
+
+	var exp int
+	if len(n.exp) > 0 {
+		i, err := strconv.ParseInt(string(n.exp), 10, 32)
+		if err != nil {
+			return "", false
+		}
+		exp = int(i)
+	}
+
+	var num []byte
+	if exp >= 0 {
+		// For positive E, shift fraction digits into integer part and also pad
+		// with zeroes as needed.
+
+		// If there are more digits in fraction than the E value, then the
+		// number is not an integer.
+		if fracSize > exp {
+			return "", false
+		}
+
+		// Make sure resulting digits are within max value limit to avoid
+		// unnecessarily constructing a large byte slice that may simply fail
+		// later on.
+		const maxDigits = 20 // Max uint64 value has 20 decimal digits.
+		if intpSize+exp > maxDigits {
+			return "", false
+		}
+
+		// Set cap to make a copy of integer part when appended.
+		num = n.intp[:len(n.intp):len(n.intp)]
+		num = append(num, n.frac...)
+		for i := 0; i < exp-fracSize; i++ {
+			num = append(num, '0')
+		}
+	} else {
+		// For negative E, shift digits in integer part out.
+
+		// If there are fractions, then the number is not an integer.
+		if fracSize > 0 {
+			return "", false
+		}
+
+		// index is where the decimal point will be after adjusting for negative
+		// exponent.
+		index := intpSize + exp
+		if index < 0 {
+			return "", false
+		}
+
+		num = n.intp
+		// If any of the digits being shifted to the right of the decimal point
+		// is non-zero, then the number is not an integer.
+		for i := index; i < intpSize; i++ {
+			if num[i] != '0' {
+				return "", false
+			}
+		}
+		num = num[:index]
+	}
+
+	if n.neg {
+		return "-" + string(num), true
+	}
+	return string(num), true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
new file mode 100644
index 000000000..f7fea7d8d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
@@ -0,0 +1,91 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"strconv"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+
+	"google.golang.org/protobuf/internal/strs"
+)
+
+func (d *Decoder) parseString(in []byte) (string, int, error) {
+	in0 := in
+	if len(in) == 0 {
+		return "", 0, ErrUnexpectedEOF
+	}
+	if in[0] != '"' {
+		return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0])
+	}
+	in = in[1:]
+	i := indexNeedEscapeInBytes(in)
+	in, out := in[i:], in[:i:i] // set cap to prevent mutations
+	for len(in) > 0 {
+		switch r, n := utf8.DecodeRune(in); {
+		case r == utf8.RuneError && n == 1:
+			return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string")
+		case r < ' ':
+			return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r)
+		case r == '"':
+			in = in[1:]
+			n := len(in0) - len(in)
+			return string(out), n, nil
+		case r == '\\':
+			if len(in) < 2 {
+				return "", 0, ErrUnexpectedEOF
+			}
+			switch r := in[1]; r {
+			case '"', '\\', '/':
+				in, out = in[2:], append(out, r)
+			case 'b':
+				in, out = in[2:], append(out, '\b')
+			case 'f':
+				in, out = in[2:], append(out, '\f')
+			case 'n':
+				in, out = in[2:], append(out, '\n')
+			case 'r':
+				in, out = in[2:], append(out, '\r')
+			case 't':
+				in, out = in[2:], append(out, '\t')
+			case 'u':
+				if len(in) < 6 {
+					return "", 0, ErrUnexpectedEOF
+				}
+				v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
+				if err != nil {
+					return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
+				}
+				in = in[6:]
+
+				r := rune(v)
+				if utf16.IsSurrogate(r) {
+					if len(in) < 6 {
+						return "", 0, ErrUnexpectedEOF
+					}
+					v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
+					r = utf16.DecodeRune(r, rune(v))
+					if in[0] != '\\' || in[1] != 'u' ||
+						r == unicode.ReplacementChar || err != nil {
+						return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
+					}
+					in = in[6:]
+				}
+				out = append(out, string(r)...)
+			default:
+				return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2])
+			}
+		default:
+			i := indexNeedEscapeInBytes(in[n:])
+			in, out = in[n+i:], append(out, in[:n+i]...)
+		}
+	}
+	return "", 0, ErrUnexpectedEOF
+}
+
+// indexNeedEscapeInBytes returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
new file mode 100644
index 000000000..50578d659
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
@@ -0,0 +1,192 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+)
+
+// Kind represents a token kind expressible in the JSON format.
+type Kind uint16
+
+const (
+	Invalid Kind = (1 << iota) / 2
+	EOF
+	Null
+	Bool
+	Number
+	String
+	Name
+	ObjectOpen
+	ObjectClose
+	ArrayOpen
+	ArrayClose
+
+	// comma is only for parsing in between tokens and
+	// does not need to be exported.
+	comma
+)
+
+func (k Kind) String() string {
+	switch k {
+	case EOF:
+		return "eof"
+	case Null:
+		return "null"
+	case Bool:
+		return "bool"
+	case Number:
+		return "number"
+	case String:
+		return "string"
+	case ObjectOpen:
+		return "{"
+	case ObjectClose:
+		return "}"
+	case Name:
+		return "name"
+	case ArrayOpen:
+		return "["
+	case ArrayClose:
+		return "]"
+	case comma:
+		return ","
+	}
+	return "<invalid>"
+}
+
+// Token provides a parsed token kind and value.
+//
+// Values are provided by the difference accessor methods. The accessor methods
+// Name, Bool, and ParsedString will panic if called on the wrong kind. There
+// are different accessor methods for the Number kind for converting to the
+// appropriate Go numeric type and those methods have the ok return value.
+type Token struct {
+	// Token kind.
+	kind Kind
+	// pos provides the position of the token in the original input.
+	pos int
+	// raw bytes of the serialized token.
+	// This is a subslice into the original input.
+	raw []byte
+	// boo is parsed boolean value.
+	boo bool
+	// str is parsed string value.
+	str string
+}
+
+// Kind returns the token kind.
+func (t Token) Kind() Kind {
+	return t.kind
+}
+
+// RawString returns the read value in string.
+func (t Token) RawString() string {
+	return string(t.raw)
+}
+
+// Pos returns the token position from the input.
+func (t Token) Pos() int {
+	return t.pos
+}
+
+// Name returns the object name if token is Name, else it panics.
+func (t Token) Name() string {
+	if t.kind == Name {
+		return t.str
+	}
+	panic(fmt.Sprintf("Token is not a Name: %v", t.RawString()))
+}
+
+// Bool returns the bool value if token kind is Bool, else it panics.
+func (t Token) Bool() bool {
+	if t.kind == Bool {
+		return t.boo
+	}
+	panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString()))
+}
+
+// ParsedString returns the string value for a JSON string token or the read
+// value in string if token is not a string.
+func (t Token) ParsedString() string {
+	if t.kind == String {
+		return t.str
+	}
+	panic(fmt.Sprintf("Token is not a String: %v", t.RawString()))
+}
+
+// Float returns the floating-point number if token kind is Number.
+//
+// The floating-point precision is specified by the bitSize parameter: 32 for
+// float32 or 64 for float64. If bitSize=32, the result still has type float64,
+// but it will be convertible to float32 without changing its value. It will
+// return false if the number exceeds the floating point limits for given
+// bitSize.
+func (t Token) Float(bitSize int) (float64, bool) {
+	if t.kind != Number {
+		return 0, false
+	}
+	f, err := strconv.ParseFloat(t.RawString(), bitSize)
+	if err != nil {
+		return 0, false
+	}
+	return f, true
+}
+
+// Int returns the signed integer number if token is Number.
+//
+// The given bitSize specifies the integer type that the result must fit into.
+// It returns false if the number is not an integer value or if the result
+// exceeds the limits for given bitSize.
+func (t Token) Int(bitSize int) (int64, bool) {
+	s, ok := t.getIntStr()
+	if !ok {
+		return 0, false
+	}
+	n, err := strconv.ParseInt(s, 10, bitSize)
+	if err != nil {
+		return 0, false
+	}
+	return n, true
+}
+
+// Uint returns the signed integer number if token is Number.
+//
+// The given bitSize specifies the unsigned integer type that the result must
+// fit into. It returns false if the number is not an unsigned integer value
+// or if the result exceeds the limits for given bitSize.
+func (t Token) Uint(bitSize int) (uint64, bool) {
+	s, ok := t.getIntStr()
+	if !ok {
+		return 0, false
+	}
+	n, err := strconv.ParseUint(s, 10, bitSize)
+	if err != nil {
+		return 0, false
+	}
+	return n, true
+}
+
+func (t Token) getIntStr() (string, bool) {
+	if t.kind != Number {
+		return "", false
+	}
+	parts, ok := parseNumberParts(t.raw)
+	if !ok {
+		return "", false
+	}
+	return normalizeToIntString(parts)
+}
+
+// TokenEquals returns true if given Tokens are equal, else false.
+func TokenEquals(x, y Token) bool {
+	return x.kind == y.kind &&
+		x.pos == y.pos &&
+		bytes.Equal(x.raw, y.raw) &&
+		x.boo == y.boo &&
+		x.str == y.str
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
new file mode 100644
index 000000000..fbdf34873
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
@@ -0,0 +1,276 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"math"
+	"math/bits"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"google.golang.org/protobuf/internal/detrand"
+	"google.golang.org/protobuf/internal/errors"
+)
+
+// kind represents an encoding type.
+type kind uint8
+
+const (
+	_ kind = (1 << iota) / 2
+	name
+	scalar
+	objectOpen
+	objectClose
+	arrayOpen
+	arrayClose
+)
+
+// Encoder provides methods to write out JSON constructs and values. The user is
+// responsible for producing valid sequences of JSON constructs and values.
+type Encoder struct {
+	indent   string
+	lastKind kind
+	indents  []byte
+	out      []byte
+}
+
+// NewEncoder returns an Encoder.
+//
+// If indent is a non-empty string, it causes every entry for an Array or Object
+// to be preceded by the indent and trailed by a newline.
+func NewEncoder(indent string) (*Encoder, error) {
+	e := &Encoder{}
+	if len(indent) > 0 {
+		if strings.Trim(indent, " \t") != "" {
+			return nil, errors.New("indent may only be composed of space or tab characters")
+		}
+		e.indent = indent
+	}
+	return e, nil
+}
+
+// Bytes returns the content of the written bytes.
+func (e *Encoder) Bytes() []byte {
+	return e.out
+}
+
+// WriteNull writes out the null value.
+func (e *Encoder) WriteNull() {
+	e.prepareNext(scalar)
+	e.out = append(e.out, "null"...)
+}
+
+// WriteBool writes out the given boolean value.
+func (e *Encoder) WriteBool(b bool) {
+	e.prepareNext(scalar)
+	if b {
+		e.out = append(e.out, "true"...)
+	} else {
+		e.out = append(e.out, "false"...)
+	}
+}
+
+// WriteString writes out the given string in JSON string value. Returns error
+// if input string contains invalid UTF-8.
+func (e *Encoder) WriteString(s string) error {
+	e.prepareNext(scalar)
+	var err error
+	if e.out, err = appendString(e.out, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Sentinel error used for indicating invalid UTF-8.
+var errInvalidUTF8 = errors.New("invalid UTF-8")
+
+func appendString(out []byte, in string) ([]byte, error) {
+	out = append(out, '"')
+	i := indexNeedEscapeInString(in)
+	in, out = in[i:], append(out, in[:i]...)
+	for len(in) > 0 {
+		switch r, n := utf8.DecodeRuneInString(in); {
+		case r == utf8.RuneError && n == 1:
+			return out, errInvalidUTF8
+		case r < ' ' || r == '"' || r == '\\':
+			out = append(out, '\\')
+			switch r {
+			case '"', '\\':
+				out = append(out, byte(r))
+			case '\b':
+				out = append(out, 'b')
+			case '\f':
+				out = append(out, 'f')
+			case '\n':
+				out = append(out, 'n')
+			case '\r':
+				out = append(out, 'r')
+			case '\t':
+				out = append(out, 't')
+			default:
+				out = append(out, 'u')
+				out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
+				out = strconv.AppendUint(out, uint64(r), 16)
+			}
+			in = in[n:]
+		default:
+			i := indexNeedEscapeInString(in[n:])
+			in, out = in[n+i:], append(out, in[:n+i]...)
+		}
+	}
+	out = append(out, '"')
+	return out, nil
+}
+
+// indexNeedEscapeInString returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInString(s string) int {
+	for i, r := range s {
+		if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError {
+			return i
+		}
+	}
+	return len(s)
+}
+
+// WriteFloat writes out the given float and bitSize in JSON number value.
+func (e *Encoder) WriteFloat(n float64, bitSize int) {
+	e.prepareNext(scalar)
+	e.out = appendFloat(e.out, n, bitSize)
+}
+
+// appendFloat formats given float in bitSize, and appends to the given []byte.
+func appendFloat(out []byte, n float64, bitSize int) []byte {
+	switch {
+	case math.IsNaN(n):
+		return append(out, `"NaN"`...)
+	case math.IsInf(n, +1):
+		return append(out, `"Infinity"`...)
+	case math.IsInf(n, -1):
+		return append(out, `"-Infinity"`...)
+	}
+
+	// JSON number formatting logic based on encoding/json.
+	// See floatEncoder.encode for reference.
+	fmt := byte('f')
+	if abs := math.Abs(n); abs != 0 {
+		if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) ||
+			bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
+			fmt = 'e'
+		}
+	}
+	out = strconv.AppendFloat(out, n, fmt, -1, bitSize)
+	if fmt == 'e' {
+		n := len(out)
+		if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' {
+			out[n-2] = out[n-1]
+			out = out[:n-1]
+		}
+	}
+	return out
+}
+
+// WriteInt writes out the given signed integer in JSON number value.
+func (e *Encoder) WriteInt(n int64) {
+	e.prepareNext(scalar)
+	e.out = append(e.out, strconv.FormatInt(n, 10)...)
+}
+
+// WriteUint writes out the given unsigned integer in JSON number value.
+func (e *Encoder) WriteUint(n uint64) {
+	e.prepareNext(scalar)
+	e.out = append(e.out, strconv.FormatUint(n, 10)...)
+}
+
+// StartObject writes out the '{' symbol.
+func (e *Encoder) StartObject() {
+	e.prepareNext(objectOpen)
+	e.out = append(e.out, '{')
+}
+
+// EndObject writes out the '}' symbol.
+func (e *Encoder) EndObject() {
+	e.prepareNext(objectClose)
+	e.out = append(e.out, '}')
+}
+
+// WriteName writes out the given string in JSON string value and the name
+// separator ':'. Returns error if input string contains invalid UTF-8, which
+// should not be likely as protobuf field names should be valid.
+func (e *Encoder) WriteName(s string) error {
+	e.prepareNext(name)
+	var err error
+	// Append to output regardless of error.
+	e.out, err = appendString(e.out, s)
+	e.out = append(e.out, ':')
+	return err
+}
+
+// StartArray writes out the '[' symbol.
+func (e *Encoder) StartArray() {
+	e.prepareNext(arrayOpen)
+	e.out = append(e.out, '[')
+}
+
+// EndArray writes out the ']' symbol.
+func (e *Encoder) EndArray() {
+	e.prepareNext(arrayClose)
+	e.out = append(e.out, ']')
+}
+
+// prepareNext adds possible comma and indentation for the next value based
+// on last type and indent option. It also updates lastKind to next.
+func (e *Encoder) prepareNext(next kind) {
+	defer func() {
+		// Set lastKind to next.
+		e.lastKind = next
+	}()
+
+	if len(e.indent) == 0 {
+		// Need to add comma on the following condition.
+		if e.lastKind&(scalar|objectClose|arrayClose) != 0 &&
+			next&(name|scalar|objectOpen|arrayOpen) != 0 {
+			e.out = append(e.out, ',')
+			// For single-line output, add a random extra space after each
+			// comma to make output unstable.
+			if detrand.Bool() {
+				e.out = append(e.out, ' ')
+			}
+		}
+		return
+	}
+
+	switch {
+	case e.lastKind&(objectOpen|arrayOpen) != 0:
+		// If next type is NOT closing, add indent and newline.
+		if next&(objectClose|arrayClose) == 0 {
+			e.indents = append(e.indents, e.indent...)
+			e.out = append(e.out, '\n')
+			e.out = append(e.out, e.indents...)
+		}
+
+	case e.lastKind&(scalar|objectClose|arrayClose) != 0:
+		switch {
+		// If next type is either a value or name, add comma and newline.
+		case next&(name|scalar|objectOpen|arrayOpen) != 0:
+			e.out = append(e.out, ',', '\n')
+
+		// If next type is a closing object or array, adjust indentation.
+		case next&(objectClose|arrayClose) != 0:
+			e.indents = e.indents[:len(e.indents)-len(e.indent)]
+			e.out = append(e.out, '\n')
+		}
+		e.out = append(e.out, e.indents...)
+
+	case e.lastKind&name != 0:
+		e.out = append(e.out, ' ')
+		// For multi-line output, add a random extra space after key: to make
+		// output unstable.
+		if detrand.Bool() {
+			e.out = append(e.out, ' ')
+		}
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
new file mode 100644
index 000000000..8c10797b9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -0,0 +1,498 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
+
+// Package anypb contains generated types for google/protobuf/any.proto.
+//
+// The Any message is a dynamic representation of any other message value.
+// It is functionally a tuple of the full name of the remote message type and
+// the serialized bytes of the remote message value.
+//
+//
+// Constructing an Any
+//
+// An Any message containing another message value is constructed using New:
+//
+//	any, err := anypb.New(m)
+//	if err != nil {
+//		... // handle error
+//	}
+//	... // make use of any
+//
+//
+// Unmarshaling an Any
+//
+// With a populated Any message, the underlying message can be serialized into
+// a remote concrete message value in a few ways.
+//
+// If the exact concrete type is known, then a new (or pre-existing) instance
+// of that message can be passed to the UnmarshalTo method:
+//
+//	m := new(foopb.MyMessage)
+//	if err := any.UnmarshalTo(m); err != nil {
+//		... // handle error
+//	}
+//	... // make use of m
+//
+// If the exact concrete type is not known, then the UnmarshalNew method can be
+// used to unmarshal the contents into a new instance of the remote message type:
+//
+//	m, err := any.UnmarshalNew()
+//	if err != nil {
+//		... // handle error
+//	}
+//	... // make use of m
+//
+// UnmarshalNew uses the global type registry to resolve the message type and
+// construct a new instance of that message to unmarshal into. In order for a
+// message type to appear in the global registry, the Go type representing that
+// protobuf message type must be linked into the Go binary. For messages
+// generated by protoc-gen-go, this is achieved through an import of the
+// generated Go package representing a .proto file.
+//
+// A common pattern with UnmarshalNew is to use a type switch with the resulting
+// proto.Message value:
+//
+//	switch m := m.(type) {
+//	case *foopb.MyMessage:
+//		... // make use of m as a *foopb.MyMessage
+//	case *barpb.OtherMessage:
+//		... // make use of m as a *barpb.OtherMessage
+//	case *bazpb.SomeMessage:
+//		... // make use of m as a *bazpb.SomeMessage
+//	}
+//
+// This pattern ensures that the generated packages containing the message types
+// listed in the case clauses are linked into the Go binary and therefore also
+// registered in the global registry.
+//
+//
+// Type checking an Any
+//
+// In order to type check whether an Any message represents some other message,
+// then use the MessageIs method:
+//
+//	if any.MessageIs((*foopb.MyMessage)(nil)) {
+//		... // make use of any, knowing that it contains a foopb.MyMessage
+//	}
+//
+// The MessageIs method can also be used with an allocated instance of the target
+// message type if the intention is to unmarshal into it if the type matches:
+//
+//	m := new(foopb.MyMessage)
+//	if any.MessageIs(m) {
+//		if err := any.UnmarshalTo(m); err != nil {
+//			... // handle error
+//		}
+//		... // make use of m
+//	}
+//
+package anypb
+
+import (
+	proto "google.golang.org/protobuf/proto"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoregistry "google.golang.org/protobuf/reflect/protoregistry"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	strings "strings"
+	sync "sync"
+)
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := anypb.New(foo)
+//      if err != nil {
+//        ...
+//      }
+//      ...
+//      foo := &pb.Foo{}
+//      if err := any.UnmarshalTo(foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+type Any struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A URL/resource name that uniquely identifies the type of the serialized
+	// protocol buffer message. This string must contain at least
+	// one "/" character. The last segment of the URL's path must represent
+	// the fully qualified name of the type (as in
+	// `path/google.protobuf.Duration`). The name should be in a canonical form
+	// (e.g., leading "." is not accepted).
+	//
+	// In practice, teams usually precompile into the binary all types that they
+	// expect it to use in the context of Any. However, for URLs which use the
+	// scheme `http`, `https`, or no scheme, one can optionally set up a type
+	// server that maps type URLs to message definitions as follows:
+	//
+	// * If no scheme is provided, `https` is assumed.
+	// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+	//   value in binary format, or produce an error.
+	// * Applications are allowed to cache lookup results based on the
+	//   URL, or have them precompiled into a binary to avoid any
+	//   lookup. Therefore, binary compatibility needs to be preserved
+	//   on changes to types. (Use versioned type names to manage
+	//   breaking changes.)
+	//
+	// Note: this functionality is not currently available in the official
+	// protobuf release, and it is not used for type URLs beginning with
+	// type.googleapis.com.
+	//
+	// Schemes other than `http`, `https` (or the empty scheme) might be
+	// used with implementation specific semantics.
+	//
+	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+	// Must be a valid serialized protocol buffer of the above specified type.
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// New marshals src into a new Any instance.
+func New(src proto.Message) (*Any, error) {
+	dst := new(Any)
+	if err := dst.MarshalFrom(src); err != nil {
+		return nil, err
+	}
+	return dst, nil
+}
+
+// MarshalFrom marshals src into dst as the underlying message
+// using the provided marshal options.
+//
+// If no options are specified, call dst.MarshalFrom instead.
+func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error {
+	const urlPrefix = "type.googleapis.com/"
+	if src == nil {
+		return protoimpl.X.NewError("invalid nil source message")
+	}
+	b, err := opts.Marshal(src)
+	if err != nil {
+		return err
+	}
+	dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName())
+	dst.Value = b
+	return nil
+}
+
+// UnmarshalTo unmarshals the underlying message from src into dst
+// using the provided unmarshal options.
+// It reports an error if dst is not of the right message type.
+//
+// If no options are specified, call src.UnmarshalTo instead.
+func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error {
+	if src == nil {
+		return protoimpl.X.NewError("invalid nil source message")
+	}
+	if !src.MessageIs(dst) {
+		got := dst.ProtoReflect().Descriptor().FullName()
+		want := src.MessageName()
+		return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want)
+	}
+	return opts.Unmarshal(src.GetValue(), dst)
+}
+
+// UnmarshalNew unmarshals the underlying message from src into dst,
+// which is newly created message using a type resolved from the type URL.
+// The message type is resolved according to opt.Resolver,
+// which should implement protoregistry.MessageTypeResolver.
+// It reports an error if the underlying message type could not be resolved.
+//
+// If no options are specified, call src.UnmarshalNew instead.
+func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) {
+	if src.GetTypeUrl() == "" {
+		return nil, protoimpl.X.NewError("invalid empty type URL")
+	}
+	if opts.Resolver == nil {
+		opts.Resolver = protoregistry.GlobalTypes
+	}
+	r, ok := opts.Resolver.(protoregistry.MessageTypeResolver)
+	if !ok {
+		return nil, protoregistry.NotFound
+	}
+	mt, err := r.FindMessageByURL(src.GetTypeUrl())
+	if err != nil {
+		if err == protoregistry.NotFound {
+			return nil, err
+		}
+		return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err)
+	}
+	dst = mt.New().Interface()
+	return dst, opts.Unmarshal(src.GetValue(), dst)
+}
+
+// MessageIs reports whether the underlying message is of the same type as m.
+func (x *Any) MessageIs(m proto.Message) bool {
+	if m == nil {
+		return false
+	}
+	url := x.GetTypeUrl()
+	name := string(m.ProtoReflect().Descriptor().FullName())
+	if !strings.HasSuffix(url, name) {
+		return false
+	}
+	return len(url) == len(name) || url[len(url)-len(name)-1] == '/'
+}
+
+// MessageName reports the full name of the underlying message,
+// returning an empty string if invalid.
+func (x *Any) MessageName() protoreflect.FullName {
+	url := x.GetTypeUrl()
+	name := protoreflect.FullName(url)
+	if i := strings.LastIndexByte(url, '/'); i >= 0 {
+		name = name[i+len("/"):]
+	}
+	if !name.IsValid() {
+		return ""
+	}
+	return name
+}
+
+// MarshalFrom marshals m into x as the underlying message.
+func (x *Any) MarshalFrom(m proto.Message) error {
+	return MarshalFrom(x, m, proto.MarshalOptions{})
+}
+
+// UnmarshalTo unmarshals the contents of the underlying message of x into m.
+// It resets m before performing the unmarshal operation.
+// It reports an error if m is not of the right message type.
+func (x *Any) UnmarshalTo(m proto.Message) error {
+	return UnmarshalTo(x, m, proto.UnmarshalOptions{})
+}
+
+// UnmarshalNew unmarshals the contents of the underlying message of x into
+// a newly allocated message of the specified type.
+// It reports an error if the underlying message type could not be resolved.
+func (x *Any) UnmarshalNew() (proto.Message, error) {
+	return UnmarshalNew(x, proto.UnmarshalOptions{})
+}
+
+func (x *Any) Reset() {
+	*x = Any{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_any_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Any) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Any) ProtoMessage() {}
+
+func (x *Any) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_any_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Any.ProtoReflect.Descriptor instead.
+func (*Any) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_any_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Any) GetTypeUrl() string {
+	if x != nil {
+		return x.TypeUrl
+	}
+	return ""
+}
+
+func (x *Any) GetValue() []byte {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+var File_google_protobuf_any_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_any_proto_rawDesc = []byte{
+	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
+	0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
+	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
+	0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
+	0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_any_proto_rawDescOnce sync.Once
+	file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc
+)
+
+func file_google_protobuf_any_proto_rawDescGZIP() []byte {
+	file_google_protobuf_any_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData)
+	})
+	return file_google_protobuf_any_proto_rawDescData
+}
+
+var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_any_proto_goTypes = []interface{}{
+	(*Any)(nil), // 0: google.protobuf.Any
+}
+var file_google_protobuf_any_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_any_proto_init() }
+func file_google_protobuf_any_proto_init() {
+	if File_google_protobuf_any_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Any); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_any_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_any_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_any_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_any_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_any_proto = out.File
+	file_google_protobuf_any_proto_rawDesc = nil
+	file_google_protobuf_any_proto_goTypes = nil
+	file_google_protobuf_any_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
new file mode 100644
index 000000000..a583ca2f6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -0,0 +1,379 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+// Package durationpb contains generated types for google/protobuf/duration.proto.
+//
+// The Duration message represents a signed span of time.
+//
+//
+// Conversion to a Go Duration
+//
+// The AsDuration method can be used to convert a Duration message to a
+// standard Go time.Duration value:
+//
+//	d := dur.AsDuration()
+//	... // make use of d as a time.Duration
+//
+// Converting to a time.Duration is a common operation so that the extensive
+// set of time-based operations provided by the time package can be leveraged.
+// See https://golang.org/pkg/time for more information.
+//
+// The AsDuration method performs the conversion on a best-effort basis.
+// Durations with denormal values (e.g., nanoseconds beyond -99999999 and
+// +99999999, inclusive; or seconds and nanoseconds with opposite signs)
+// are normalized during the conversion to a time.Duration. To manually check for
+// invalid Duration per the documented limitations in duration.proto,
+// additionally call the CheckValid method:
+//
+//	if err := dur.CheckValid(); err != nil {
+//		... // handle error
+//	}
+//
+// Note that the documented limitations in duration.proto does not protect a
+// Duration from overflowing the representable range of a time.Duration in Go.
+// The AsDuration method uses saturation arithmetic such that an overflow clamps
+// the resulting value to the closest representable value (e.g., math.MaxInt64
+// for positive overflow and math.MinInt64 for negative overflow).
+//
+//
+// Conversion from a Go Duration
+//
+// The durationpb.New function can be used to construct a Duration message
+// from a standard Go time.Duration value:
+//
+//	dur := durationpb.New(d)
+//	... // make use of d as a *durationpb.Duration
+//
+package durationpb
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	math "math"
+	reflect "reflect"
+	sync "sync"
+	time "time"
+)
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (duration.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Signed seconds of the span of time. Must be from -315,576,000,000
+	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
+	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Signed fractions of a second at nanosecond resolution of the span
+	// of time. Durations less than one second are represented with a 0
+	// `seconds` field and a positive or negative `nanos` field. For durations
+	// of one second or more, a non-zero value for the `nanos` field must be
+	// of the same sign as the `seconds` field. Must be from -999,999,999
+	// to +999,999,999 inclusive.
+	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+// New constructs a new Duration from the provided time.Duration.
+func New(d time.Duration) *Duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}
+}
+
+// AsDuration converts x to a time.Duration,
+// returning the closest duration value in the event of overflow.
+func (x *Duration) AsDuration() time.Duration {
+	secs := x.GetSeconds()
+	nanos := x.GetNanos()
+	d := time.Duration(secs) * time.Second
+	overflow := d/time.Second != time.Duration(secs)
+	d += time.Duration(nanos) * time.Nanosecond
+	overflow = overflow || (secs < 0 && nanos < 0 && d > 0)
+	overflow = overflow || (secs > 0 && nanos > 0 && d < 0)
+	if overflow {
+		switch {
+		case secs < 0:
+			return time.Duration(math.MinInt64)
+		case secs > 0:
+			return time.Duration(math.MaxInt64)
+		}
+	}
+	return d
+}
+
+// IsValid reports whether the duration is valid.
+// It is equivalent to CheckValid == nil.
+func (x *Duration) IsValid() bool {
+	return x.check() == 0
+}
+
+// CheckValid returns an error if the duration is invalid.
+// In particular, it checks whether the value is within the range of
+// -10000 years to +10000 years inclusive.
+// An error is reported for a nil Duration.
+func (x *Duration) CheckValid() error {
+	switch x.check() {
+	case invalidNil:
+		return protoimpl.X.NewError("invalid nil Duration")
+	case invalidUnderflow:
+		return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x)
+	case invalidOverflow:
+		return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x)
+	case invalidNanosRange:
+		return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x)
+	case invalidNanosSign:
+		return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x)
+	default:
+		return nil
+	}
+}
+
+const (
+	_ = iota
+	invalidNil
+	invalidUnderflow
+	invalidOverflow
+	invalidNanosRange
+	invalidNanosSign
+)
+
+func (x *Duration) check() uint {
+	const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min
+	secs := x.GetSeconds()
+	nanos := x.GetNanos()
+	switch {
+	case x == nil:
+		return invalidNil
+	case secs < -absDuration:
+		return invalidUnderflow
+	case secs > +absDuration:
+		return invalidOverflow
+	case nanos <= -1e9 || nanos >= +1e9:
+		return invalidNanosRange
+	case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):
+		return invalidNanosSign
+	default:
+		return 0
+	}
+}
+
+func (x *Duration) Reset() {
+	*x = Duration{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_duration_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Duration) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Duration) ProtoMessage() {}
+
+func (x *Duration) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_duration_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Duration.ProtoReflect.Descriptor instead.
+func (*Duration) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Duration) GetSeconds() int64 {
+	if x != nil {
+		return x.Seconds
+	}
+	return 0
+}
+
+func (x *Duration) GetNanos() int32 {
+	if x != nil {
+		return x.Nanos
+	}
+	return 0
+}
+
+var File_google_protobuf_duration_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_duration_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
+	0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
+	0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01,
+	0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64,
+	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47,
+	0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+	0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_duration_proto_rawDescOnce sync.Once
+	file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc
+)
+
+func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
+	file_google_protobuf_duration_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData)
+	})
+	return file_google_protobuf_duration_proto_rawDescData
+}
+
+var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_duration_proto_goTypes = []interface{}{
+	(*Duration)(nil), // 0: google.protobuf.Duration
+}
+var file_google_protobuf_duration_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_duration_proto_init() }
+func file_google_protobuf_duration_proto_init() {
+	if File_google_protobuf_duration_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Duration); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_duration_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_duration_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_duration_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_duration_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_duration_proto = out.File
+	file_google_protobuf_duration_proto_rawDesc = nil
+	file_google_protobuf_duration_proto_goTypes = nil
+	file_google_protobuf_duration_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
new file mode 100644
index 000000000..1b2085d46
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -0,0 +1,591 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/field_mask.proto
+
+// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto.
+//
+// The FieldMask message represents a set of symbolic field paths.
+// The paths are specific to some target message type,
+// which is not stored within the FieldMask message itself.
+//
+//
+// Constructing a FieldMask
+//
+// The New function is used construct a FieldMask:
+//
+//	var messageType *descriptorpb.DescriptorProto
+//	fm, err := fieldmaskpb.New(messageType, "field.name", "field.number")
+//	if err != nil {
+//		... // handle error
+//	}
+//	... // make use of fm
+//
+// The "field.name" and "field.number" paths are valid paths according to the
+// google.protobuf.DescriptorProto message. Use of a path that does not correlate
+// to valid fields reachable from DescriptorProto would result in an error.
+//
+// Once a FieldMask message has been constructed,
+// the Append method can be used to insert additional paths to the path set:
+//
+//	var messageType *descriptorpb.DescriptorProto
+//	if err := fm.Append(messageType, "options"); err != nil {
+//		... // handle error
+//	}
+//
+//
+// Type checking a FieldMask
+//
+// In order to verify that a FieldMask represents a set of fields that are
+// reachable from some target message type, use the IsValid method:
+//
+//	var messageType *descriptorpb.DescriptorProto
+//	if fm.IsValid(messageType) {
+//		... // make use of fm
+//	}
+//
+// IsValid needs to be passed the target message type as an input since the
+// FieldMask message itself does not store the message type that the set of paths
+// are for.
+package fieldmaskpb
+
+import (
+	proto "google.golang.org/protobuf/proto"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sort "sort"
+	strings "strings"
+	sync "sync"
+)
+
+// `FieldMask` represents a set of symbolic field paths, for example:
+//
+//     paths: "f.a"
+//     paths: "f.b.d"
+//
+// Here `f` represents a field in some root message, `a` and `b`
+// fields in the message found in `f`, and `d` a field found in the
+// message in `f.b`.
+//
+// Field masks are used to specify a subset of fields that should be
+// returned by a get operation or modified by an update operation.
+// Field masks also have a custom JSON encoding (see below).
+//
+// # Field Masks in Projections
+//
+// When used in the context of a projection, a response message or
+// sub-message is filtered by the API to only contain those fields as
+// specified in the mask. For example, if the mask in the previous
+// example is applied to a response message as follows:
+//
+//     f {
+//       a : 22
+//       b {
+//         d : 1
+//         x : 2
+//       }
+//       y : 13
+//     }
+//     z: 8
+//
+// The result will not contain specific values for fields x,y and z
+// (their value will be set to the default, and omitted in proto text
+// output):
+//
+//
+//     f {
+//       a : 22
+//       b {
+//         d : 1
+//       }
+//     }
+//
+// A repeated field is not allowed except at the last position of a
+// paths string.
+//
+// If a FieldMask object is not present in a get operation, the
+// operation applies to all fields (as if a FieldMask of all fields
+// had been specified).
+//
+// Note that a field mask does not necessarily apply to the
+// top-level response message. In case of a REST get operation, the
+// field mask applies directly to the response, but in case of a REST
+// list operation, the mask instead applies to each individual message
+// in the returned resource list. In case of a REST custom method,
+// other definitions may be used. Where the mask applies will be
+// clearly documented together with its declaration in the API.  In
+// any case, the effect on the returned resource/resources is required
+// behavior for APIs.
+//
+// # Field Masks in Update Operations
+//
+// A field mask in update operations specifies which fields of the
+// targeted resource are going to be updated. The API is required
+// to only change the values of the fields as specified in the mask
+// and leave the others untouched. If a resource is passed in to
+// describe the updated values, the API ignores the values of all
+// fields not covered by the mask.
+//
+// If a repeated field is specified for an update operation, new values will
+// be appended to the existing repeated field in the target resource. Note that
+// a repeated field is only allowed in the last position of a `paths` string.
+//
+// If a sub-message is specified in the last position of the field mask for an
+// update operation, then new value will be merged into the existing sub-message
+// in the target resource.
+//
+// For example, given the target message:
+//
+//     f {
+//       b {
+//         d: 1
+//         x: 2
+//       }
+//       c: [1]
+//     }
+//
+// And an update message:
+//
+//     f {
+//       b {
+//         d: 10
+//       }
+//       c: [2]
+//     }
+//
+// then if the field mask is:
+//
+//  paths: ["f.b", "f.c"]
+//
+// then the result will be:
+//
+//     f {
+//       b {
+//         d: 10
+//         x: 2
+//       }
+//       c: [1, 2]
+//     }
+//
+// An implementation may provide options to override this default behavior for
+// repeated and message fields.
+//
+// In order to reset a field's value to the default, the field must
+// be in the mask and set to the default value in the provided resource.
+// Hence, in order to reset all fields of a resource, provide a default
+// instance of the resource and set all fields in the mask, or do
+// not provide a mask as described below.
+//
+// If a field mask is not present on update, the operation applies to
+// all fields (as if a field mask of all fields has been specified).
+// Note that in the presence of schema evolution, this may mean that
+// fields the client does not know and has therefore not filled into
+// the request will be reset to their default. If this is unwanted
+// behavior, a specific service may require a client to always specify
+// a field mask, producing an error if not.
+//
+// As with get operations, the location of the resource which
+// describes the updated values in the request message depends on the
+// operation kind. In any case, the effect of the field mask is
+// required to be honored by the API.
+//
+// ## Considerations for HTTP REST
+//
+// The HTTP kind of an update operation which uses a field mask must
+// be set to PATCH instead of PUT in order to satisfy HTTP semantics
+// (PUT must only be used for full updates).
+//
+// # JSON Encoding of Field Masks
+//
+// In JSON, a field mask is encoded as a single string where paths are
+// separated by a comma. Fields name in each path are converted
+// to/from lower-camel naming conventions.
+//
+// As an example, consider the following message declarations:
+//
+//     message Profile {
+//       User user = 1;
+//       Photo photo = 2;
+//     }
+//     message User {
+//       string display_name = 1;
+//       string address = 2;
+//     }
+//
+// In proto a field mask for `Profile` may look as such:
+//
+//     mask {
+//       paths: "user.display_name"
+//       paths: "photo"
+//     }
+//
+// In JSON, the same mask is represented as below:
+//
+//     {
+//       mask: "user.displayName,photo"
+//     }
+//
+// # Field Masks and Oneof Fields
+//
+// Field masks treat fields in oneofs just as regular fields. Consider the
+// following message:
+//
+//     message SampleMessage {
+//       oneof test_oneof {
+//         string name = 4;
+//         SubMessage sub_message = 9;
+//       }
+//     }
+//
+// The field mask can be:
+//
+//     mask {
+//       paths: "name"
+//     }
+//
+// Or:
+//
+//     mask {
+//       paths: "sub_message"
+//     }
+//
+// Note that oneof type names ("test_oneof" in this case) cannot be used in
+// paths.
+//
+// ## Field Mask Verification
+//
+// The implementation of any API method which has a FieldMask type field in the
+// request should verify the included field paths, and return an
+// `INVALID_ARGUMENT` error if any path is unmappable.
+type FieldMask struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The set of field mask paths.
+	Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+}
+
+// New constructs a field mask from a list of paths and verifies that
+// each one is valid according to the specified message type.
+func New(m proto.Message, paths ...string) (*FieldMask, error) {
+	x := new(FieldMask)
+	return x, x.Append(m, paths...)
+}
+
+// Union returns the union of all the paths in the input field masks.
+func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
+	var out []string
+	out = append(out, mx.GetPaths()...)
+	out = append(out, my.GetPaths()...)
+	for _, m := range ms {
+		out = append(out, m.GetPaths()...)
+	}
+	return &FieldMask{Paths: normalizePaths(out)}
+}
+
+// Intersect returns the intersection of all the paths in the input field masks.
+func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
+	var ss1, ss2 []string // reused buffers for performance
+	intersect := func(out, in []string) []string {
+		ss1 = normalizePaths(append(ss1[:0], in...))
+		ss2 = normalizePaths(append(ss2[:0], out...))
+		out = out[:0]
+		for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {
+			switch s1, s2 := ss1[i1], ss2[i2]; {
+			case hasPathPrefix(s1, s2):
+				out = append(out, s1)
+				i1++
+			case hasPathPrefix(s2, s1):
+				out = append(out, s2)
+				i2++
+			case lessPath(s1, s2):
+				i1++
+			case lessPath(s2, s1):
+				i2++
+			}
+		}
+		return out
+	}
+
+	out := Union(mx, my, ms...).GetPaths()
+	out = intersect(out, mx.GetPaths())
+	out = intersect(out, my.GetPaths())
+	for _, m := range ms {
+		out = intersect(out, m.GetPaths())
+	}
+	return &FieldMask{Paths: normalizePaths(out)}
+}
+
+// IsValid reports whether all the paths are syntactically valid and
+// refer to known fields in the specified message type.
+// It reports false for a nil FieldMask.
+func (x *FieldMask) IsValid(m proto.Message) bool {
+	paths := x.GetPaths()
+	return x != nil && numValidPaths(m, paths) == len(paths)
+}
+
+// Append appends a list of paths to the mask and verifies that each one
+// is valid according to the specified message type.
+// An invalid path is not appended and breaks insertion of subsequent paths.
+func (x *FieldMask) Append(m proto.Message, paths ...string) error {
+	numValid := numValidPaths(m, paths)
+	x.Paths = append(x.Paths, paths[:numValid]...)
+	paths = paths[numValid:]
+	if len(paths) > 0 {
+		name := m.ProtoReflect().Descriptor().FullName()
+		return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name)
+	}
+	return nil
+}
+
+func numValidPaths(m proto.Message, paths []string) int {
+	md0 := m.ProtoReflect().Descriptor()
+	for i, path := range paths {
+		md := md0
+		if !rangeFields(path, func(field string) bool {
+			// Search the field within the message.
+			if md == nil {
+				return false // not within a message
+			}
+			fd := md.Fields().ByName(protoreflect.Name(field))
+			// The real field name of a group is the message name.
+			if fd == nil {
+				gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field)))
+				if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field {
+					fd = gd
+				}
+			} else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field {
+				fd = nil
+			}
+			if fd == nil {
+				return false // message has does not have this field
+			}
+
+			// Identify the next message to search within.
+			md = fd.Message() // may be nil
+
+			// Repeated fields are only allowed at the last position.
+			if fd.IsList() || fd.IsMap() {
+				md = nil
+			}
+
+			return true
+		}) {
+			return i
+		}
+	}
+	return len(paths)
+}
+
+// Normalize converts the mask to its canonical form where all paths are sorted
+// and redundant paths are removed.
+func (x *FieldMask) Normalize() {
+	x.Paths = normalizePaths(x.Paths)
+}
+
+func normalizePaths(paths []string) []string {
+	sort.Slice(paths, func(i, j int) bool {
+		return lessPath(paths[i], paths[j])
+	})
+
+	// Elide any path that is a prefix match on the previous.
+	out := paths[:0]
+	for _, path := range paths {
+		if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {
+			continue
+		}
+		out = append(out, path)
+	}
+	return out
+}
+
+// hasPathPrefix is like strings.HasPrefix, but further checks for either
+// an exact matche or that the prefix is delimited by a dot.
+func hasPathPrefix(path, prefix string) bool {
+	return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')
+}
+
+// lessPath is a lexicographical comparison where dot is specially treated
+// as the smallest symbol.
+func lessPath(x, y string) bool {
+	for i := 0; i < len(x) && i < len(y); i++ {
+		if x[i] != y[i] {
+			return (x[i] - '.') < (y[i] - '.')
+		}
+	}
+	return len(x) < len(y)
+}
+
+// rangeFields is like strings.Split(path, "."), but avoids allocations by
+// iterating over each field in place and calling a iterator function.
+func rangeFields(path string, f func(field string) bool) bool {
+	for {
+		var field string
+		if i := strings.IndexByte(path, '.'); i >= 0 {
+			field, path = path[:i], path[i:]
+		} else {
+			field, path = path, ""
+		}
+
+		if !f(field) {
+			return false
+		}
+
+		if len(path) == 0 {
+			return true
+		}
+		path = strings.TrimPrefix(path, ".")
+	}
+}
+
+func (x *FieldMask) Reset() {
+	*x = FieldMask{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FieldMask) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldMask) ProtoMessage() {}
+
+func (x *FieldMask) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead.
+func (*FieldMask) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldMask) GetPaths() []string {
+	if x != nil {
+		return x.Paths
+	}
+	return nil
+}
+
+var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_field_mask_proto_rawDesc = []byte{
+	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
+	0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+	0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+	0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
+	0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
+	0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
+	0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
+	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
+	file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
+)
+
+func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
+	file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
+	})
+	return file_google_protobuf_field_mask_proto_rawDescData
+}
+
+var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_field_mask_proto_goTypes = []interface{}{
+	(*FieldMask)(nil), // 0: google.protobuf.FieldMask
+}
+var file_google_protobuf_field_mask_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_field_mask_proto_init() }
+func file_google_protobuf_field_mask_proto_init() {
+	if File_google_protobuf_field_mask_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*FieldMask); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_field_mask_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_field_mask_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_field_mask_proto = out.File
+	file_google_protobuf_field_mask_proto_rawDesc = nil
+	file_google_protobuf_field_mask_proto_goTypes = nil
+	file_google_protobuf_field_mask_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
new file mode 100644
index 000000000..c9ae92132
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -0,0 +1,390 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+// Package timestamppb contains generated types for google/protobuf/timestamp.proto.
+//
+// The Timestamp message represents a timestamp,
+// an instant in time since the Unix epoch (January 1st, 1970).
+//
+//
+// Conversion to a Go Time
+//
+// The AsTime method can be used to convert a Timestamp message to a
+// standard Go time.Time value in UTC:
+//
+//	t := ts.AsTime()
+//	... // make use of t as a time.Time
+//
+// Converting to a time.Time is a common operation so that the extensive
+// set of time-based operations provided by the time package can be leveraged.
+// See https://golang.org/pkg/time for more information.
+//
+// The AsTime method performs the conversion on a best-effort basis. Timestamps
+// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive)
+// are normalized during the conversion to a time.Time. To manually check for
+// invalid Timestamps per the documented limitations in timestamp.proto,
+// additionally call the CheckValid method:
+//
+//	if err := ts.CheckValid(); err != nil {
+//		... // handle error
+//	}
+//
+//
+// Conversion from a Go Time
+//
+// The timestamppb.New function can be used to construct a Timestamp message
+// from a standard Go time.Time value:
+//
+//	ts := timestamppb.New(t)
+//	... // make use of ts as a *timestamppb.Timestamp
+//
+// In order to construct a Timestamp representing the current time, use Now:
+//
+//	ts := timestamppb.Now()
+//	... // make use of ts as a *timestamppb.Timestamp
+//
+package timestamppb
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+	time "time"
+)
+
+// A Timestamp represents a point in time independent of any time zone or local
+// calendar, encoded as a count of seconds and fractions of seconds at
+// nanosecond resolution. The count is relative to an epoch at UTC midnight on
+// January 1, 1970, in the proleptic Gregorian calendar which extends the
+// Gregorian calendar backwards to year one.
+//
+// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
+// second table is needed for interpretation, using a [24-hour linear
+// smear](https://developers.google.com/time/smear).
+//
+// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
+// restricting to that range, we ensure that we can convert to and from [RFC
+// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from Java `Instant.now()`.
+//
+//     Instant now = Instant.now();
+//
+//     Timestamp timestamp =
+//         Timestamp.newBuilder().setSeconds(now.getEpochSecond())
+//             .setNanos(now.getNano()).build();
+//
+//
+// Example 6: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard
+// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using
+// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
+// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
+// the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Represents seconds of UTC time since Unix epoch
+	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+	// 9999-12-31T23:59:59Z inclusive.
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Non-negative fractions of a second at nanosecond resolution. Negative
+	// second values with fractions must still have non-negative nanos values
+	// that count forward in time. Must be from 0 to 999,999,999
+	// inclusive.
+	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+// Now constructs a new Timestamp from the current time.
+func Now() *Timestamp {
+	return New(time.Now())
+}
+
+// New constructs a new Timestamp from the provided time.Time.
+func New(t time.Time) *Timestamp {
+	return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}
+}
+
+// AsTime converts x to a time.Time.
+func (x *Timestamp) AsTime() time.Time {
+	return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()
+}
+
+// IsValid reports whether the timestamp is valid.
+// It is equivalent to CheckValid == nil.
+func (x *Timestamp) IsValid() bool {
+	return x.check() == 0
+}
+
+// CheckValid returns an error if the timestamp is invalid.
+// In particular, it checks whether the value represents a date that is
+// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.
+// An error is reported for a nil Timestamp.
+func (x *Timestamp) CheckValid() error {
+	switch x.check() {
+	case invalidNil:
+		return protoimpl.X.NewError("invalid nil Timestamp")
+	case invalidUnderflow:
+		return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x)
+	case invalidOverflow:
+		return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x)
+	case invalidNanos:
+		return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x)
+	default:
+		return nil
+	}
+}
+
+const (
+	_ = iota
+	invalidNil
+	invalidUnderflow
+	invalidOverflow
+	invalidNanos
+)
+
+func (x *Timestamp) check() uint {
+	const minTimestamp = -62135596800  // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive
+	const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive
+	secs := x.GetSeconds()
+	nanos := x.GetNanos()
+	switch {
+	case x == nil:
+		return invalidNil
+	case secs < minTimestamp:
+		return invalidUnderflow
+	case secs > maxTimestamp:
+		return invalidOverflow
+	case nanos < 0 || nanos >= 1e9:
+		return invalidNanos
+	default:
+		return 0
+	}
+}
+
+func (x *Timestamp) Reset() {
+	*x = Timestamp{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Timestamp) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Timestamp) ProtoMessage() {}
+
+func (x *Timestamp) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead.
+func (*Timestamp) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Timestamp) GetSeconds() int64 {
+	if x != nil {
+		return x.Seconds
+	}
+	return 0
+}
+
+func (x *Timestamp) GetNanos() int32 {
+	if x != nil {
+		return x.Nanos
+	}
+	return 0
+}
+
+var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_timestamp_proto_rawDesc = []byte{
+	0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
+	0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+	0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
+	0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
+	0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
+	0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
+	0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+	0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
+	file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc
+)
+
+func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
+	file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData)
+	})
+	return file_google_protobuf_timestamp_proto_rawDescData
+}
+
+var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_timestamp_proto_goTypes = []interface{}{
+	(*Timestamp)(nil), // 0: google.protobuf.Timestamp
+}
+var file_google_protobuf_timestamp_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_timestamp_proto_init() }
+func file_google_protobuf_timestamp_proto_init() {
+	if File_google_protobuf_timestamp_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Timestamp); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_timestamp_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_timestamp_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_timestamp_proto = out.File
+	file_google_protobuf_timestamp_proto_rawDesc = nil
+	file_google_protobuf_timestamp_proto_goTypes = nil
+	file_google_protobuf_timestamp_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
new file mode 100644
index 000000000..895a8049e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -0,0 +1,760 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+//
+// These wrappers have no meaningful use within repeated fields as they lack
+// the ability to detect presence on individual elements.
+// These wrappers have no meaningful use within a map or a oneof since
+// individual entries of a map or fields of a oneof can already detect presence.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+package wrapperspb
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The double value.
+	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Double stores v in a new DoubleValue and returns a pointer to it.
+func Double(v float64) *DoubleValue {
+	return &DoubleValue{Value: v}
+}
+
+func (x *DoubleValue) Reset() {
+	*x = DoubleValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *DoubleValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleValue) ProtoMessage() {}
+
+func (x *DoubleValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead.
+func (*DoubleValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DoubleValue) GetValue() float64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The float value.
+	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Float stores v in a new FloatValue and returns a pointer to it.
+func Float(v float32) *FloatValue {
+	return &FloatValue{Value: v}
+}
+
+func (x *FloatValue) Reset() {
+	*x = FloatValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FloatValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FloatValue) ProtoMessage() {}
+
+func (x *FloatValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead.
+func (*FloatValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FloatValue) GetValue() float32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The int64 value.
+	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Int64 stores v in a new Int64Value and returns a pointer to it.
+func Int64(v int64) *Int64Value {
+	return &Int64Value{Value: v}
+}
+
+func (x *Int64Value) Reset() {
+	*x = Int64Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Int64Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64Value) ProtoMessage() {}
+
+func (x *Int64Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead.
+func (*Int64Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Int64Value) GetValue() int64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The uint64 value.
+	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// UInt64 stores v in a new UInt64Value and returns a pointer to it.
+func UInt64(v uint64) *UInt64Value {
+	return &UInt64Value{Value: v}
+}
+
+func (x *UInt64Value) Reset() {
+	*x = UInt64Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UInt64Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt64Value) ProtoMessage() {}
+
+func (x *UInt64Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead.
+func (*UInt64Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UInt64Value) GetValue() uint64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The int32 value.
+	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Int32 stores v in a new Int32Value and returns a pointer to it.
+func Int32(v int32) *Int32Value {
+	return &Int32Value{Value: v}
+}
+
+func (x *Int32Value) Reset() {
+	*x = Int32Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Int32Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32Value) ProtoMessage() {}
+
+func (x *Int32Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead.
+func (*Int32Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Int32Value) GetValue() int32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The uint32 value.
+	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// UInt32 stores v in a new UInt32Value and returns a pointer to it.
+func UInt32(v uint32) *UInt32Value {
+	return &UInt32Value{Value: v}
+}
+
+func (x *UInt32Value) Reset() {
+	*x = UInt32Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UInt32Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt32Value) ProtoMessage() {}
+
+func (x *UInt32Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead.
+func (*UInt32Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *UInt32Value) GetValue() uint32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The bool value.
+	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Bool stores v in a new BoolValue and returns a pointer to it.
+func Bool(v bool) *BoolValue {
+	return &BoolValue{Value: v}
+}
+
+func (x *BoolValue) Reset() {
+	*x = BoolValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BoolValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BoolValue) ProtoMessage() {}
+
+func (x *BoolValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead.
+func (*BoolValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *BoolValue) GetValue() bool {
+	if x != nil {
+		return x.Value
+	}
+	return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The string value.
+	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// String stores v in a new StringValue and returns a pointer to it.
+func String(v string) *StringValue {
+	return &StringValue{Value: v}
+}
+
+func (x *StringValue) Reset() {
+	*x = StringValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *StringValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StringValue) ProtoMessage() {}
+
+func (x *StringValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use StringValue.ProtoReflect.Descriptor instead.
+func (*StringValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *StringValue) GetValue() string {
+	if x != nil {
+		return x.Value
+	}
+	return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The bytes value.
+	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Bytes stores v in a new BytesValue and returns a pointer to it.
+func Bytes(v []byte) *BytesValue {
+	return &BytesValue{Value: v}
+}
+
+func (x *BytesValue) Reset() {
+	*x = BytesValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BytesValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BytesValue) ProtoMessage() {}
+
+func (x *BytesValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead.
+func (*BytesValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BytesValue) GetValue() []byte {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_wrappers_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
+	0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
+	0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
+	0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
+	0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
+	0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+	0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+	0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+	0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
+	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
+	file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc
+)
+
+func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
+	file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData)
+	})
+	return file_google_protobuf_wrappers_proto_rawDescData
+}
+
+var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
+var file_google_protobuf_wrappers_proto_goTypes = []interface{}{
+	(*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
+	(*FloatValue)(nil),  // 1: google.protobuf.FloatValue
+	(*Int64Value)(nil),  // 2: google.protobuf.Int64Value
+	(*UInt64Value)(nil), // 3: google.protobuf.UInt64Value
+	(*Int32Value)(nil),  // 4: google.protobuf.Int32Value
+	(*UInt32Value)(nil), // 5: google.protobuf.UInt32Value
+	(*BoolValue)(nil),   // 6: google.protobuf.BoolValue
+	(*StringValue)(nil), // 7: google.protobuf.StringValue
+	(*BytesValue)(nil),  // 8: google.protobuf.BytesValue
+}
+var file_google_protobuf_wrappers_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_wrappers_proto_init() }
+func file_google_protobuf_wrappers_proto_init() {
+	if File_google_protobuf_wrappers_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*DoubleValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*FloatValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Int64Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UInt64Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Int32Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UInt32Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BoolValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*StringValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BytesValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   9,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_wrappers_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_wrappers_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_wrappers_proto = out.File
+	file_google_protobuf_wrappers_proto_rawDesc = nil
+	file_google_protobuf_wrappers_proto_goTypes = nil
+	file_google_protobuf_wrappers_proto_depIdxs = nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1c581cd1c..fd0a3f92e 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -107,6 +107,9 @@ github.com/bytedance/sonic/loader
 github.com/bytedance/sonic/option
 github.com/bytedance/sonic/unquote
 github.com/bytedance/sonic/utf8
+# github.com/cenkalti/backoff/v4 v4.2.0
+## explicit; go 1.18
+github.com/cenkalti/backoff/v4
 # github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311
 ## explicit; go 1.15
 github.com/chenzhuoyu/base64x
@@ -202,6 +205,13 @@ github.com/go-fed/httpsig
 github.com/go-jose/go-jose/v3
 github.com/go-jose/go-jose/v3/cipher
 github.com/go-jose/go-jose/v3/json
+# github.com/go-logr/logr v1.2.3
+## explicit; go 1.16
+github.com/go-logr/logr
+github.com/go-logr/logr/funcr
+# github.com/go-logr/stdr v1.2.2
+## explicit; go 1.16
+github.com/go-logr/stdr
 # github.com/go-playground/form/v4 v4.2.0
 ## explicit; go 1.13
 github.com/go-playground/form/v4
@@ -244,7 +254,12 @@ github.com/golang/geo/s1
 github.com/golang/geo/s2
 # github.com/golang/protobuf v1.5.2
 ## explicit; go 1.9
+github.com/golang/protobuf/jsonpb
 github.com/golang/protobuf/proto
+github.com/golang/protobuf/ptypes
+github.com/golang/protobuf/ptypes/any
+github.com/golang/protobuf/ptypes/duration
+github.com/golang/protobuf/ptypes/timestamp
 # github.com/google/uuid v1.3.0
 ## explicit
 github.com/google/uuid
@@ -266,6 +281,11 @@ github.com/gorilla/sessions
 # github.com/gorilla/websocket v1.5.0
 ## explicit; go 1.12
 github.com/gorilla/websocket
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0
+## explicit; go 1.14
+github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
+github.com/grpc-ecosystem/grpc-gateway/v2/runtime
+github.com/grpc-ecosystem/grpc-gateway/v2/utilities
 # github.com/h2non/filetype v1.1.3
 ## explicit; go 1.13
 github.com/h2non/filetype
@@ -706,6 +726,12 @@ github.com/uptrace/bun/dialect/pgdialect
 # github.com/uptrace/bun/dialect/sqlitedialect v1.1.12
 ## explicit; go 1.18
 github.com/uptrace/bun/dialect/sqlitedialect
+# github.com/uptrace/bun/extra/bunotel v1.1.12
+## explicit; go 1.18
+github.com/uptrace/bun/extra/bunotel
+# github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.21
+## explicit; go 1.18
+github.com/uptrace/opentelemetry-go-extra/otelsql
 # github.com/vmihailenco/msgpack/v5 v5.3.5
 ## explicit; go 1.11
 github.com/vmihailenco/msgpack/v5
@@ -729,6 +755,69 @@ github.com/yuin/goldmark/renderer
 github.com/yuin/goldmark/renderer/html
 github.com/yuin/goldmark/text
 github.com/yuin/goldmark/util
+# go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.40.0
+## explicit; go 1.18
+go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin
+# go.opentelemetry.io/otel v1.14.0
+## explicit; go 1.18
+go.opentelemetry.io/otel
+go.opentelemetry.io/otel/attribute
+go.opentelemetry.io/otel/baggage
+go.opentelemetry.io/otel/codes
+go.opentelemetry.io/otel/exporters/otlp/internal
+go.opentelemetry.io/otel/exporters/otlp/internal/envconfig
+go.opentelemetry.io/otel/internal
+go.opentelemetry.io/otel/internal/attribute
+go.opentelemetry.io/otel/internal/baggage
+go.opentelemetry.io/otel/internal/global
+go.opentelemetry.io/otel/propagation
+go.opentelemetry.io/otel/semconv/internal
+go.opentelemetry.io/otel/semconv/internal/v2
+go.opentelemetry.io/otel/semconv/v1.10.0
+go.opentelemetry.io/otel/semconv/v1.12.0
+go.opentelemetry.io/otel/semconv/v1.17.0
+go.opentelemetry.io/otel/semconv/v1.17.0/httpconv
+# go.opentelemetry.io/otel/exporters/jaeger v1.14.0
+## explicit; go 1.18
+go.opentelemetry.io/otel/exporters/jaeger
+go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent
+go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger
+go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore
+go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift
+# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0
+## explicit; go 1.18
+go.opentelemetry.io/otel/exporters/otlp/internal/retry
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0
+## explicit; go 1.18
+go.opentelemetry.io/otel/exporters/otlp/otlptrace
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0
+## explicit; go 1.18
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+# go.opentelemetry.io/otel/metric v0.36.0
+## explicit; go 1.18
+go.opentelemetry.io/otel/metric
+go.opentelemetry.io/otel/metric/global
+go.opentelemetry.io/otel/metric/instrument
+go.opentelemetry.io/otel/metric/internal/global
+go.opentelemetry.io/otel/metric/unit
+# go.opentelemetry.io/otel/sdk v1.14.0
+## explicit; go 1.18
+go.opentelemetry.io/otel/sdk/instrumentation
+go.opentelemetry.io/otel/sdk/internal
+go.opentelemetry.io/otel/sdk/internal/env
+go.opentelemetry.io/otel/sdk/resource
+go.opentelemetry.io/otel/sdk/trace
+# go.opentelemetry.io/otel/trace v1.14.0
+## explicit; go 1.18
+go.opentelemetry.io/otel/trace
+# go.opentelemetry.io/proto/otlp v0.19.0
+## explicit; go 1.14
+go.opentelemetry.io/proto/otlp/collector/trace/v1
+go.opentelemetry.io/proto/otlp/common/v1
+go.opentelemetry.io/proto/otlp/resource/v1
+go.opentelemetry.io/proto/otlp/trace/v1
 # go.uber.org/automaxprocs v1.5.2
 ## explicit; go 1.18
 go.uber.org/automaxprocs/internal/cgroups
@@ -787,9 +876,11 @@ golang.org/x/net/http2/hpack
 golang.org/x/net/idna
 golang.org/x/net/internal/iana
 golang.org/x/net/internal/socket
+golang.org/x/net/internal/timeseries
 golang.org/x/net/ipv4
 golang.org/x/net/ipv6
 golang.org/x/net/publicsuffix
+golang.org/x/net/trace
 # golang.org/x/oauth2 v0.7.0
 ## explicit; go 1.17
 golang.org/x/oauth2
@@ -801,6 +892,7 @@ golang.org/x/sys/execabs
 golang.org/x/sys/internal/unsafeheader
 golang.org/x/sys/unix
 golang.org/x/sys/windows
+golang.org/x/sys/windows/registry
 # golang.org/x/text v0.9.0
 ## explicit; go 1.17
 golang.org/x/text/cases
@@ -841,14 +933,73 @@ google.golang.org/appengine/internal/log
 google.golang.org/appengine/internal/remote_api
 google.golang.org/appengine/internal/urlfetch
 google.golang.org/appengine/urlfetch
+# google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f
+## explicit; go 1.19
+google.golang.org/genproto/googleapis/api/httpbody
+google.golang.org/genproto/googleapis/rpc/errdetails
+google.golang.org/genproto/googleapis/rpc/status
+google.golang.org/genproto/protobuf/field_mask
+# google.golang.org/grpc v1.53.0
+## explicit; go 1.17
+google.golang.org/grpc
+google.golang.org/grpc/attributes
+google.golang.org/grpc/backoff
+google.golang.org/grpc/balancer
+google.golang.org/grpc/balancer/base
+google.golang.org/grpc/balancer/grpclb/state
+google.golang.org/grpc/balancer/roundrobin
+google.golang.org/grpc/binarylog/grpc_binarylog_v1
+google.golang.org/grpc/channelz
+google.golang.org/grpc/codes
+google.golang.org/grpc/connectivity
+google.golang.org/grpc/credentials
+google.golang.org/grpc/credentials/insecure
+google.golang.org/grpc/encoding
+google.golang.org/grpc/encoding/gzip
+google.golang.org/grpc/encoding/proto
+google.golang.org/grpc/grpclog
+google.golang.org/grpc/internal
+google.golang.org/grpc/internal/backoff
+google.golang.org/grpc/internal/balancer/gracefulswitch
+google.golang.org/grpc/internal/balancerload
+google.golang.org/grpc/internal/binarylog
+google.golang.org/grpc/internal/buffer
+google.golang.org/grpc/internal/channelz
+google.golang.org/grpc/internal/credentials
+google.golang.org/grpc/internal/envconfig
+google.golang.org/grpc/internal/grpclog
+google.golang.org/grpc/internal/grpcrand
+google.golang.org/grpc/internal/grpcsync
+google.golang.org/grpc/internal/grpcutil
+google.golang.org/grpc/internal/metadata
+google.golang.org/grpc/internal/pretty
+google.golang.org/grpc/internal/resolver
+google.golang.org/grpc/internal/resolver/dns
+google.golang.org/grpc/internal/resolver/passthrough
+google.golang.org/grpc/internal/resolver/unix
+google.golang.org/grpc/internal/serviceconfig
+google.golang.org/grpc/internal/status
+google.golang.org/grpc/internal/syscall
+google.golang.org/grpc/internal/transport
+google.golang.org/grpc/internal/transport/networktype
+google.golang.org/grpc/keepalive
+google.golang.org/grpc/metadata
+google.golang.org/grpc/peer
+google.golang.org/grpc/resolver
+google.golang.org/grpc/serviceconfig
+google.golang.org/grpc/stats
+google.golang.org/grpc/status
+google.golang.org/grpc/tap
 # google.golang.org/protobuf v1.28.1
 ## explicit; go 1.11
+google.golang.org/protobuf/encoding/protojson
 google.golang.org/protobuf/encoding/prototext
 google.golang.org/protobuf/encoding/protowire
 google.golang.org/protobuf/internal/descfmt
 google.golang.org/protobuf/internal/descopts
 google.golang.org/protobuf/internal/detrand
 google.golang.org/protobuf/internal/encoding/defval
+google.golang.org/protobuf/internal/encoding/json
 google.golang.org/protobuf/internal/encoding/messageset
 google.golang.org/protobuf/internal/encoding/tag
 google.golang.org/protobuf/internal/encoding/text
@@ -870,6 +1021,11 @@ google.golang.org/protobuf/reflect/protoregistry
 google.golang.org/protobuf/runtime/protoiface
 google.golang.org/protobuf/runtime/protoimpl
 google.golang.org/protobuf/types/descriptorpb
+google.golang.org/protobuf/types/known/anypb
+google.golang.org/protobuf/types/known/durationpb
+google.golang.org/protobuf/types/known/fieldmaskpb
+google.golang.org/protobuf/types/known/timestamppb
+google.golang.org/protobuf/types/known/wrapperspb
 # gopkg.in/ini.v1 v1.67.0
 ## explicit
 gopkg.in/ini.v1