From 3be9813d56ff53aede1f2c77c54fd5bd3e53dcc7 Mon Sep 17 00:00:00 2001 From: Tyler Yahn Date: Sat, 12 Jun 2021 18:39:22 +0000 Subject: [PATCH] Deprecate the exporters in the "trace" and "metric" sub-directories (#1993) * Rename exporters/metric/prometheus * Rename exporters/trace/jaeger * Rename exporters/trace/zipkin * Fix dependabot config and make test * Update README docs * Deprecate exporters instead of remove them * Update dependabot with old modules * Run crosslink * Add lint override for known deprecated pkg * Update PR number --- .github/dependabot.yml | 50 + CHANGELOG.md | 4 + Makefile | 2 +- README.md | 20 +- bridge/opencensus/go.mod | 12 +- bridge/opentracing/go.mod | 12 +- example/jaeger/go.mod | 14 +- example/jaeger/main.go | 2 +- example/namedtracer/go.mod | 12 +- example/opencensus/go.mod | 12 +- example/otel-collector/go.mod | 12 +- example/passthrough/go.mod | 12 +- example/prometheus/go.mod | 14 +- example/prometheus/main.go | 2 +- example/zipkin/go.mod | 14 +- example/zipkin/main.go | 2 +- exporters/README.md | 17 - exporters/jaeger/README.md | 13 + exporters/jaeger/agent.go | 203 ++ exporters/jaeger/agent_test.go | 189 ++ exporters/jaeger/assertsocketbuffersize.go | 48 + .../jaeger/assertsocketbuffersize_windows.go | 34 + exporters/jaeger/doc.go | 20 + exporters/jaeger/env.go | 44 + exporters/jaeger/env_test.go | 242 ++ exporters/jaeger/go.mod | 75 + exporters/jaeger/go.sum | 16 + .../gen-go/agent/GoUnusedProtection__.go | 6 + .../internal/gen-go/agent/agent-consts.go | 27 + .../gen-go/agent/agent-remote/agent-remote.go | 210 ++ .../jaeger/internal/gen-go/agent/agent.go | 412 +++ .../gen-go/jaeger/GoUnusedProtection__.go | 6 + .../collector-remote/collector-remote.go | 180 + .../internal/gen-go/jaeger/jaeger-consts.go | 22 + .../jaeger/internal/gen-go/jaeger/jaeger.go | 3022 +++++++++++++++++ .../gen-go/zipkincore/GoUnusedProtection__.go | 6 + .../zipkin_collector-remote.go | 180 + .../gen-go/zipkincore/zipkincore-consts.go | 39 + .../internal/gen-go/zipkincore/zipkincore.go | 2067 +++++++++++ .../internal/third_party/thrift/LICENSE | 306 ++ .../jaeger/internal/third_party/thrift/NOTICE | 5 + .../lib/go/thrift/application_exception.go | 180 + .../thrift/lib/go/thrift/binary_protocol.go | 555 +++ .../lib/go/thrift/buffered_transport.go | 99 + .../thrift/lib/go/thrift/client.go | 109 + .../thrift/lib/go/thrift/compact_protocol.go | 865 +++++ .../thrift/lib/go/thrift/configuration.go | 378 +++ .../thrift/lib/go/thrift/context.go | 24 + .../thrift/lib/go/thrift/debug_protocol.go | 447 +++ .../thrift/lib/go/thrift/deserializer.go | 121 + .../thrift/lib/go/thrift/exception.go | 116 + .../thrift/lib/go/thrift/framed_transport.go | 223 ++ .../thrift/lib/go/thrift/header_context.go | 110 + .../thrift/lib/go/thrift/header_protocol.go | 351 ++ .../thrift/lib/go/thrift/header_transport.go | 810 +++++ .../thrift/lib/go/thrift/http_client.go | 258 ++ .../thrift/lib/go/thrift/http_transport.go | 74 + .../lib/go/thrift/iostream_transport.go | 222 ++ .../thrift/lib/go/thrift/json_protocol.go | 591 ++++ .../thrift/lib/go/thrift/logger.go | 69 + .../thrift/lib/go/thrift/memory_buffer.go | 80 + .../thrift/lib/go/thrift/messagetype.go | 31 + .../thrift/lib/go/thrift/middleware.go | 109 + .../lib/go/thrift/multiplexed_protocol.go | 237 ++ .../thrift/lib/go/thrift/numeric.go | 164 + .../thrift/lib/go/thrift/pointerize.go | 52 + .../thrift/lib/go/thrift/processor_factory.go | 80 + .../thrift/lib/go/thrift/protocol.go | 177 + .../lib/go/thrift/protocol_exception.go | 104 + .../thrift/lib/go/thrift/protocol_factory.go | 25 + .../thrift/lib/go/thrift/response_helper.go | 94 + .../thrift/lib/go/thrift/rich_transport.go | 71 + .../thrift/lib/go/thrift/serializer.go | 136 + .../thrift/lib/go/thrift/server.go | 35 + .../thrift/lib/go/thrift/server_socket.go | 137 + .../thrift/lib/go/thrift/server_transport.go | 34 + .../lib/go/thrift/simple_json_protocol.go | 1373 ++++++++ .../thrift/lib/go/thrift/simple_server.go | 332 ++ .../thrift/lib/go/thrift/socket.go | 238 ++ .../thrift/lib/go/thrift/socket_conn.go | 102 + .../thrift/lib/go/thrift/socket_unix_conn.go | 83 + .../lib/go/thrift/socket_windows_conn.go | 34 + .../thrift/lib/go/thrift/ssl_server_socket.go | 112 + .../thrift/lib/go/thrift/ssl_socket.go | 258 ++ .../thrift/lib/go/thrift/transport.go | 70 + .../lib/go/thrift/transport_exception.go | 131 + .../thrift/lib/go/thrift/transport_factory.go | 39 + .../third_party/thrift/lib/go/thrift/type.go | 69 + .../thrift/lib/go/thrift/zlib_transport.go | 137 + exporters/jaeger/jaeger.go | 346 ++ exporters/jaeger/jaeger_benchmark_test.go | 115 + exporters/jaeger/jaeger_test.go | 723 ++++ exporters/jaeger/reconnecting_udp_client.go | 203 ++ .../jaeger/reconnecting_udp_client_test.go | 498 +++ exporters/jaeger/uploader.go | 313 ++ exporters/metric/prometheus/go.mod | 6 + exporters/metric/prometheus/prometheus.go | 7 + .../metric/prometheus/prometheus_test.go | 2 +- exporters/otlp/otlpmetric/go.mod | 6 + .../otlp/otlpmetric/otlpmetricgrpc/go.mod | 6 + exporters/otlp/otlptrace/go.mod | 12 +- exporters/otlp/otlptrace/otlptracegrpc/go.mod | 12 +- exporters/otlp/otlptrace/otlptracehttp/go.mod | 12 +- exporters/prometheus/README.md | 9 + exporters/prometheus/go.mod | 77 + exporters/prometheus/go.sum | 150 + exporters/prometheus/prometheus.go | 317 ++ exporters/prometheus/prometheus_test.go | 203 ++ exporters/prometheus/sanitize.go | 50 + exporters/prometheus/sanitize_test.go | 61 + exporters/stdout/go.mod | 12 +- exporters/trace/jaeger/doc.go | 7 +- exporters/trace/jaeger/go.mod | 6 + exporters/trace/zipkin/doc.go | 7 +- exporters/trace/zipkin/go.mod | 6 + exporters/zipkin/doc.go | 20 + exporters/zipkin/go.mod | 76 + exporters/zipkin/go.sum | 108 + exporters/zipkin/model.go | 288 ++ exporters/zipkin/model_test.go | 982 ++++++ exporters/zipkin/zipkin.go | 185 + exporters/zipkin/zipkin_test.go | 338 ++ go.mod | 12 +- internal/metric/go.mod | 12 +- internal/tools/go.mod | 12 +- metric/go.mod | 12 +- oteltest/go.mod | 12 +- sdk/export/metric/go.mod | 12 +- sdk/go.mod | 12 +- sdk/metric/go.mod | 12 +- trace/go.mod | 12 +- website_docs/exporting_data.md | 8 +- 132 files changed, 22900 insertions(+), 110 deletions(-) delete mode 100644 exporters/README.md create mode 100644 exporters/jaeger/README.md create mode 100644 exporters/jaeger/agent.go create mode 100644 exporters/jaeger/agent_test.go create mode 100644 exporters/jaeger/assertsocketbuffersize.go create mode 100644 exporters/jaeger/assertsocketbuffersize_windows.go create mode 100644 exporters/jaeger/doc.go create mode 100644 exporters/jaeger/env.go create mode 100644 exporters/jaeger/env_test.go create mode 100644 exporters/jaeger/go.mod create mode 100644 exporters/jaeger/go.sum create mode 100644 exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go create mode 100644 exporters/jaeger/internal/gen-go/agent/agent-consts.go create mode 100755 exporters/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go create mode 100644 exporters/jaeger/internal/gen-go/agent/agent.go create mode 100644 exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go create mode 100755 exporters/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go create mode 100644 exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go create mode 100644 exporters/jaeger/internal/gen-go/jaeger/jaeger.go create mode 100644 exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go create mode 100755 exporters/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go create mode 100644 exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go create mode 100644 exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go create mode 100644 exporters/jaeger/internal/third_party/thrift/LICENSE create mode 100644 exporters/jaeger/internal/third_party/thrift/NOTICE create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go create mode 100644 exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go create mode 100644 exporters/jaeger/jaeger.go create mode 100644 exporters/jaeger/jaeger_benchmark_test.go create mode 100644 exporters/jaeger/jaeger_test.go create mode 100644 exporters/jaeger/reconnecting_udp_client.go create mode 100644 exporters/jaeger/reconnecting_udp_client_test.go create mode 100644 exporters/jaeger/uploader.go create mode 100644 exporters/prometheus/README.md create mode 100644 exporters/prometheus/go.mod create mode 100644 exporters/prometheus/go.sum create mode 100644 exporters/prometheus/prometheus.go create mode 100644 exporters/prometheus/prometheus_test.go create mode 100644 exporters/prometheus/sanitize.go create mode 100644 exporters/prometheus/sanitize_test.go create mode 100644 exporters/zipkin/doc.go create mode 100644 exporters/zipkin/go.mod create mode 100644 exporters/zipkin/go.sum create mode 100644 exporters/zipkin/model.go create mode 100644 exporters/zipkin/model_test.go create mode 100644 exporters/zipkin/zipkin.go create mode 100644 exporters/zipkin/zipkin_test.go diff --git a/.github/dependabot.yml b/.github/dependabot.yml index fcd692ab260..f669211cdf1 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -136,6 +136,16 @@ updates: schedule: day: sunday interval: weekly + - + package-ecosystem: gomod + directory: /exporters/prometheus + labels: + - dependencies + - go + - "Skip Changelog" + schedule: + day: sunday + interval: weekly - package-ecosystem: gomod directory: /exporters/stdout @@ -156,6 +166,16 @@ updates: schedule: day: sunday interval: weekly + - + package-ecosystem: gomod + directory: /exporters/jaeger + labels: + - dependencies + - go + - "Skip Changelog" + schedule: + day: sunday + interval: weekly - package-ecosystem: gomod directory: /exporters/trace/zipkin @@ -166,6 +186,16 @@ updates: schedule: day: sunday interval: weekly + - + package-ecosystem: gomod + directory: /exporters/zipkin + labels: + - dependencies + - go + - "Skip Changelog" + schedule: + day: sunday + interval: weekly - package-ecosystem: gomod directory: /sdk @@ -196,6 +226,16 @@ updates: schedule: day: sunday interval: weekly + - + package-ecosystem: gomod + directory: /metric + labels: + - dependencies + - go + - "Skip Changelog" + schedule: + day: sunday + interval: weekly - package-ecosystem: gomod directory: /sdk/export/metric @@ -286,3 +326,13 @@ updates: schedule: day: sunday interval: weekly + - + package-ecosystem: gomod + directory: /trace + labels: + - dependencies + - go + - "Skip Changelog" + schedule: + day: sunday + interval: weekly diff --git a/CHANGELOG.md b/CHANGELOG.md index e3fe3b3da6a..2bef76628f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,10 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### Deprecated +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + ### Removed - Remove `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) diff --git a/Makefile b/Makefile index 16ec7be0218..8d0452e4e49 100644 --- a/Makefile +++ b/Makefile @@ -162,7 +162,7 @@ license-check: dependabot-check: @result=$$( \ for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \ - do grep -q "$$f" .github/dependabot.yml \ + do grep -q "directory: \+$$f" .github/dependabot.yml \ || echo "$$f"; \ done; \ ); \ diff --git a/README.md b/README.md index c83ee8be7aa..a4852f9f19f 100644 --- a/README.md +++ b/README.md @@ -89,15 +89,17 @@ practical uses of this process. Now that your application is instrumented to collect telemetry, it needs an export pipeline to send that telemetry to an observability platform. -You can find officially supported exporters [here](./exporters/) and in the -companion [contrib -repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters/metric). -Additionally, there are many vendor specific or 3rd party exporters for -OpenTelemetry. These exporters are broken down by -[trace](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/trace?tab=importedby) -and -[metric](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/metric?tab=importedby) -support. +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +| :-----------------------------------: | :-----: | :----: | +| [Jaeger](./exporters/jaeger/) | | ✓ | +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +Additionally, OpenTelemetry community supported exporters can be found in the [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters). ## Contributing diff --git a/bridge/opencensus/go.mod b/bridge/opencensus/go.mod index e4a005cb691..531f3600fc5 100644 --- a/bridge/opencensus/go.mod +++ b/bridge/opencensus/go.mod @@ -32,13 +32,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -64,6 +64,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/bridge/opentracing/go.mod b/bridge/opentracing/go.mod index 6b8e3946de0..6cd04f079bc 100644 --- a/bridge/opentracing/go.mod +++ b/bridge/opentracing/go.mod @@ -28,13 +28,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -60,6 +60,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/example/jaeger/go.mod b/example/jaeger/go.mod index d6e5113197a..84a2caae708 100644 --- a/example/jaeger/go.mod +++ b/example/jaeger/go.mod @@ -4,13 +4,13 @@ go 1.15 replace ( go.opentelemetry.io/otel => ../.. - go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger go.opentelemetry.io/otel/sdk => ../../sdk ) require ( go.opentelemetry.io/otel v0.20.0 - go.opentelemetry.io/otel/exporters/trace/jaeger v0.20.0 + go.opentelemetry.io/otel/exporters/jaeger v0.20.0 go.opentelemetry.io/otel/sdk v0.20.0 ) @@ -32,11 +32,11 @@ replace go.opentelemetry.io/otel/example/prometheus => ../prometheus replace go.opentelemetry.io/otel/example/zipkin => ../zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -60,6 +60,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/example/jaeger/main.go b/example/jaeger/main.go index 97e4bacf70b..146e276d0be 100644 --- a/example/jaeger/main.go +++ b/example/jaeger/main.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/trace/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.4.0" diff --git a/example/namedtracer/go.mod b/example/namedtracer/go.mod index 051b4591d76..2fa0a080d6d 100644 --- a/example/namedtracer/go.mod +++ b/example/namedtracer/go.mod @@ -33,11 +33,11 @@ replace go.opentelemetry.io/otel/example/prometheus => ../prometheus replace go.opentelemetry.io/otel/example/zipkin => ../zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -61,6 +61,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/example/opencensus/go.mod b/example/opencensus/go.mod index e806c0a1ec4..2c2311844f9 100644 --- a/example/opencensus/go.mod +++ b/example/opencensus/go.mod @@ -34,11 +34,11 @@ replace go.opentelemetry.io/otel/example/prometheus => ../prometheus replace go.opentelemetry.io/otel/example/zipkin => ../zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -62,6 +62,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/example/otel-collector/go.mod b/example/otel-collector/go.mod index 0f7e811bd7b..9c290ee23bd 100644 --- a/example/otel-collector/go.mod +++ b/example/otel-collector/go.mod @@ -33,13 +33,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../prometheus replace go.opentelemetry.io/otel/example/zipkin => ../zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -63,6 +63,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/example/passthrough/go.mod b/example/passthrough/go.mod index d9936fb9289..2610e394aff 100644 --- a/example/passthrough/go.mod +++ b/example/passthrough/go.mod @@ -36,11 +36,11 @@ replace go.opentelemetry.io/otel/example/prometheus => ../prometheus replace go.opentelemetry.io/otel/example/zipkin => ../zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -62,6 +62,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/example/prometheus/go.mod b/example/prometheus/go.mod index d84b1b11a7e..d463bbb8070 100644 --- a/example/prometheus/go.mod +++ b/example/prometheus/go.mod @@ -4,13 +4,13 @@ go 1.15 replace ( go.opentelemetry.io/otel => ../.. - go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus go.opentelemetry.io/otel/sdk => ../../sdk ) require ( go.opentelemetry.io/otel v0.20.0 - go.opentelemetry.io/otel/exporters/metric/prometheus v0.20.0 + go.opentelemetry.io/otel/exporters/prometheus v0.20.0 go.opentelemetry.io/otel/metric v0.20.0 go.opentelemetry.io/otel/sdk/export/metric v0.20.0 go.opentelemetry.io/otel/sdk/metric v0.20.0 @@ -36,9 +36,9 @@ replace go.opentelemetry.io/otel/example/zipkin => ../zipkin replace go.opentelemetry.io/otel/exporters/stdout => ../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -62,6 +62,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/example/prometheus/main.go b/example/prometheus/main.go index 6241b92ec8d..81cce5cda0f 100644 --- a/example/prometheus/main.go +++ b/example/prometheus/main.go @@ -23,7 +23,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/metric/prometheus" + "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/global" export "go.opentelemetry.io/otel/sdk/export/metric" diff --git a/example/zipkin/go.mod b/example/zipkin/go.mod index a07315ce2ea..9908a81d2f4 100644 --- a/example/zipkin/go.mod +++ b/example/zipkin/go.mod @@ -4,13 +4,13 @@ go 1.15 replace ( go.opentelemetry.io/otel => ../.. - go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin go.opentelemetry.io/otel/sdk => ../../sdk ) require ( go.opentelemetry.io/otel v0.20.0 - go.opentelemetry.io/otel/exporters/trace/zipkin v0.20.0 + go.opentelemetry.io/otel/exporters/zipkin v0.20.0 go.opentelemetry.io/otel/sdk v0.20.0 go.opentelemetry.io/otel/trace v0.20.0 ) @@ -33,11 +33,11 @@ replace go.opentelemetry.io/otel/example/prometheus => ../prometheus replace go.opentelemetry.io/otel/example/zipkin => ./ -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -61,6 +61,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/example/zipkin/main.go b/example/zipkin/main.go index badec69d0e7..08689854d93 100644 --- a/example/zipkin/main.go +++ b/example/zipkin/main.go @@ -24,7 +24,7 @@ import ( "time" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/exporters/trace/zipkin" + "go.opentelemetry.io/otel/exporters/zipkin" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.4.0" diff --git a/exporters/README.md b/exporters/README.md deleted file mode 100644 index 136b30dc2db..00000000000 --- a/exporters/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Exporters - -Included in this directory are exporters that export both metric and trace telemetry. - -- [stdout](./stdout): Writes telemetry to a specified local output as structured JSON. -- [otlp](./otlp): Sends telemetry to an OpenTelemetry collector as OTLP. - -Additionally, there are [metric](./metric) and [trace](./trace) only exporters. - -## Metric Telemetry Only - -- [prometheus](./metric/prometheus): Exposes metric telemetry as Prometheus metrics. - -## Trace Telemetry Only - -- [jaeger](./trace/jaeger): Sends properly transformed trace telemetry to a Jaeger endpoint. -- [zipkin](./trace/zipkin): Sends properly transformed trace telemetry to a Zipkin endpoint. diff --git a/exporters/jaeger/README.md b/exporters/jaeger/README.md new file mode 100644 index 00000000000..56ad4747275 --- /dev/null +++ b/exporters/jaeger/README.md @@ -0,0 +1,13 @@ +# OpenTelemetry-Go Jaeger Exporter + +OpenTelemetry Jaeger exporter + +## Installation + +``` +go get -u go.opentelemetry.io/otel/exporters/jaeger +``` + +## Maintenance + +This exporter uses a vendored copy of the Apache Thrift library (v0.14.1) at a custom import path. When re-generating Thrift code in future, please adapt import paths as necessary. diff --git a/exporters/jaeger/agent.go b/exporters/jaeger/agent.go new file mode 100644 index 00000000000..80cc1d2f628 --- /dev/null +++ b/exporters/jaeger/agent.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "context" + "fmt" + "io" + "log" + "net" + "strings" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" + + genAgent "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent" + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" +) + +// udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent +const udpPacketMaxLength = 65000 + +// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface. +type agentClientUDP struct { + genAgent.Agent + io.Closer + + connUDP udpConn + client *genAgent.AgentClient + maxPacketSize int // max size of datagram in bytes + thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span + thriftProtocol thrift.TProtocol +} + +type udpConn interface { + Write([]byte) (int, error) + SetWriteBuffer(int) error + Close() error +} + +type agentClientUDPParams struct { + Host string + Port string + MaxPacketSize int + Logger *log.Logger + AttemptReconnecting bool + AttemptReconnectInterval time.Duration +} + +// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. +func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) { + hostPort := net.JoinHostPort(params.Host, params.Port) + // validate hostport + if _, _, err := net.SplitHostPort(hostPort); err != nil { + return nil, err + } + + if params.MaxPacketSize <= 0 { + params.MaxPacketSize = udpPacketMaxLength + } + + if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 { + params.AttemptReconnectInterval = time.Second * 30 + } + + thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) + protocolFactory := thrift.NewTCompactProtocolFactoryConf(&thrift.TConfiguration{}) + thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) + client := genAgent.NewAgentClientFactory(thriftBuffer, protocolFactory) + + var connUDP udpConn + var err error + + if params.AttemptReconnecting { + // host is hostname, setup resolver loop in case host record changes during operation + connUDP, err = newReconnectingUDPConn(hostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) + if err != nil { + return nil, err + } + } else { + destAddr, err := net.ResolveUDPAddr("udp", hostPort) + if err != nil { + return nil, err + } + + connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) + if err != nil { + return nil, err + } + } + + if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { + return nil, err + } + + return &agentClientUDP{ + connUDP: connUDP, + client: client, + maxPacketSize: params.MaxPacketSize, + thriftBuffer: thriftBuffer, + thriftProtocol: thriftProtocol, + }, nil +} + +// EmitBatch buffers batch to fit into UDP packets and sends the data to the agent. +func (a *agentClientUDP) EmitBatch(ctx context.Context, batch *gen.Batch) error { + var errs []error + processSize, err := a.calcSizeOfSerializedThrift(ctx, batch.Process) + if err != nil { + // drop the batch if serialization of process fails. + return err + } + totalSize := processSize + var spans []*gen.Span + for _, span := range batch.Spans { + spanSize, err := a.calcSizeOfSerializedThrift(ctx, span) + if err != nil { + errs = append(errs, fmt.Errorf("thrift serialization failed: %v", span)) + continue + } + if spanSize+processSize >= a.maxPacketSize { + // drop the span that exceeds the limit. + errs = append(errs, fmt.Errorf("span too large to send: %v", span)) + continue + } + if totalSize+spanSize >= a.maxPacketSize { + if err := a.flush(ctx, &gen.Batch{ + Process: batch.Process, + Spans: spans, + }); err != nil { + errs = append(errs, err) + } + spans = spans[:0] + totalSize = processSize + } + totalSize += spanSize + spans = append(spans, span) + } + + if len(spans) > 0 { + if err := a.flush(ctx, &gen.Batch{ + Process: batch.Process, + Spans: spans, + }); err != nil { + errs = append(errs, err) + } + } + + if len(errs) == 1 { + return errs[0] + } else if len(errs) > 1 { + joined := a.makeJoinedErrorString(errs) + return fmt.Errorf("multiple errors during transform: %s", joined) + } + return nil +} + +// makeJoinedErrorString join all the errors to one error message. +func (a *agentClientUDP) makeJoinedErrorString(errs []error) string { + var errMsgs []string + for _, err := range errs { + errMsgs = append(errMsgs, err.Error()) + } + return strings.Join(errMsgs, ", ") +} + +// flush will send the batch of spans to the agent. +func (a *agentClientUDP) flush(ctx context.Context, batch *gen.Batch) error { + a.thriftBuffer.Reset() + if err := a.client.EmitBatch(ctx, batch); err != nil { + return err + } + if a.thriftBuffer.Len() > a.maxPacketSize { + return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d", + a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) + } + _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) + return err +} + +// calcSizeOfSerializedThrift calculate the serialized thrift packet size. +func (a *agentClientUDP) calcSizeOfSerializedThrift(ctx context.Context, thriftStruct thrift.TStruct) (int, error) { + a.thriftBuffer.Reset() + err := thriftStruct.Write(ctx, a.thriftProtocol) + return a.thriftBuffer.Len(), err +} + +// Close implements Close() of io.Closer and closes the underlying UDP connection. +func (a *agentClientUDP) Close() error { + return a.connUDP.Close() +} diff --git a/exporters/jaeger/agent_test.go b/exporters/jaeger/agent_test.go new file mode 100644 index 00000000000..62de111dae5 --- /dev/null +++ b/exporters/jaeger/agent_test.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package jaeger + +import ( + "context" + "log" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/sdk/trace/tracetest" +) + +func TestNewAgentClientUDPWithParamsBadHostport(t *testing.T) { + agentClient, err := newAgentClientUDP(agentClientUDPParams{ + Host: "blahblah", + Port: "", + }) + assert.Error(t, err) + assert.Nil(t, agentClient) +} + +func TestNewAgentClientUDPWithParams(t *testing.T) { + mockServer, err := newUDPListener() + require.NoError(t, err) + defer mockServer.Close() + host, port, err := net.SplitHostPort(mockServer.LocalAddr().String()) + assert.NoError(t, err) + + agentClient, err := newAgentClientUDP(agentClientUDPParams{ + Host: host, + Port: port, + MaxPacketSize: 25000, + AttemptReconnecting: true, + }) + assert.NoError(t, err) + assert.NotNil(t, agentClient) + assert.Equal(t, 25000, agentClient.maxPacketSize) + + if assert.IsType(t, &reconnectingUDPConn{}, agentClient.connUDP) { + assert.Equal(t, (*log.Logger)(nil), agentClient.connUDP.(*reconnectingUDPConn).logger) + } + + assert.NoError(t, agentClient.Close()) +} + +func TestNewAgentClientUDPWithParamsDefaults(t *testing.T) { + mockServer, err := newUDPListener() + require.NoError(t, err) + defer mockServer.Close() + host, port, err := net.SplitHostPort(mockServer.LocalAddr().String()) + assert.NoError(t, err) + + agentClient, err := newAgentClientUDP(agentClientUDPParams{ + Host: host, + Port: port, + AttemptReconnecting: true, + }) + assert.NoError(t, err) + assert.NotNil(t, agentClient) + assert.Equal(t, udpPacketMaxLength, agentClient.maxPacketSize) + + if assert.IsType(t, &reconnectingUDPConn{}, agentClient.connUDP) { + assert.Equal(t, (*log.Logger)(nil), agentClient.connUDP.(*reconnectingUDPConn).logger) + } + + assert.NoError(t, agentClient.Close()) +} + +func TestNewAgentClientUDPWithParamsReconnectingDisabled(t *testing.T) { + mockServer, err := newUDPListener() + require.NoError(t, err) + defer mockServer.Close() + host, port, err := net.SplitHostPort(mockServer.LocalAddr().String()) + assert.NoError(t, err) + + agentClient, err := newAgentClientUDP(agentClientUDPParams{ + Host: host, + Port: port, + Logger: nil, + AttemptReconnecting: false, + }) + assert.NoError(t, err) + assert.NotNil(t, agentClient) + assert.Equal(t, udpPacketMaxLength, agentClient.maxPacketSize) + + assert.IsType(t, &net.UDPConn{}, agentClient.connUDP) + + assert.NoError(t, agentClient.Close()) +} + +type errorHandler struct{ t *testing.T } + +func (eh errorHandler) Handle(err error) { assert.NoError(eh.t, err) } + +func TestJaegerAgentUDPLimitBatching(t *testing.T) { + otel.SetErrorHandler(errorHandler{t}) + + mockServer, err := newUDPListener() + require.NoError(t, err) + defer mockServer.Close() + host, port, err := net.SplitHostPort(mockServer.LocalAddr().String()) + assert.NoError(t, err) + + // 1500 spans, size 79559, does not fit within one UDP packet with the default size of 65000. + n := 1500 + s := make(tracetest.SpanStubs, n).Snapshots() + + exp, err := New( + WithAgentEndpoint(WithAgentHost(host), WithAgentPort(port)), + ) + require.NoError(t, err) + + ctx := context.Background() + assert.NoError(t, exp.ExportSpans(ctx, s)) + assert.NoError(t, exp.Shutdown(ctx)) +} + +// generateALargeSpan generates a span with a long name. +func generateALargeSpan() tracetest.SpanStub { + return tracetest.SpanStub{ + Name: "a-longer-name-that-makes-it-exceeds-limit", + } +} + +func TestSpanExceedsMaxPacketLimit(t *testing.T) { + otel.SetErrorHandler(errorHandler{t}) + + mockServer, err := newUDPListener() + require.NoError(t, err) + defer mockServer.Close() + host, port, err := net.SplitHostPort(mockServer.LocalAddr().String()) + assert.NoError(t, err) + + // 106 is the serialized size of a span with default values. + maxSize := 106 + + largeSpans := tracetest.SpanStubs{generateALargeSpan(), {}}.Snapshots() + normalSpans := tracetest.SpanStubs{{}, {}}.Snapshots() + + exp, err := New( + WithAgentEndpoint(WithAgentHost(host), WithAgentPort(port), WithMaxPacketSize(maxSize+1)), + ) + require.NoError(t, err) + + ctx := context.Background() + assert.Error(t, exp.ExportSpans(ctx, largeSpans)) + assert.NoError(t, exp.ExportSpans(ctx, normalSpans)) + assert.NoError(t, exp.Shutdown(ctx)) +} + +func TestEmitBatchWithMultipleErrors(t *testing.T) { + otel.SetErrorHandler(errorHandler{t}) + + mockServer, err := newUDPListener() + require.NoError(t, err) + defer mockServer.Close() + host, port, err := net.SplitHostPort(mockServer.LocalAddr().String()) + assert.NoError(t, err) + + span := generateALargeSpan() + largeSpans := tracetest.SpanStubs{span, span}.Snapshots() + // make max packet size smaller than span + maxSize := len(span.Name) + exp, err := New( + WithAgentEndpoint(WithAgentHost(host), WithAgentPort(port), WithMaxPacketSize(maxSize)), + ) + require.NoError(t, err) + + ctx := context.Background() + err = exp.ExportSpans(ctx, largeSpans) + assert.Error(t, err) + require.Contains(t, err.Error(), "multiple errors") +} diff --git a/exporters/jaeger/assertsocketbuffersize.go b/exporters/jaeger/assertsocketbuffersize.go new file mode 100644 index 00000000000..40f9ce830c7 --- /dev/null +++ b/exporters/jaeger/assertsocketbuffersize.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package jaeger + +import ( + "net" + "runtime" + "syscall" + "testing" + + "github.com/stretchr/testify/assert" +) + +func assertSockBufferSize(t *testing.T, expectedBytes int, conn *net.UDPConn) bool { + fd, err := conn.File() + if !assert.NoError(t, err) { + return false + } + + bufferBytes, err := syscall.GetsockoptInt(int(fd.Fd()), syscall.SOL_SOCKET, syscall.SO_SNDBUF) + if !assert.NoError(t, err) { + return false + } + + // The linux kernel doubles SO_SNDBUF value (to allow space for + // bookkeeping overhead) when it is set using setsockopt(2), and this + // doubled value is returned by getsockopt(2) + // https://linux.die.net/man/7/socket + if runtime.GOOS == "linux" { + return assert.GreaterOrEqual(t, expectedBytes*2, bufferBytes) + } + + return assert.Equal(t, expectedBytes, bufferBytes) +} diff --git a/exporters/jaeger/assertsocketbuffersize_windows.go b/exporters/jaeger/assertsocketbuffersize_windows.go new file mode 100644 index 00000000000..9fba4ba4d34 --- /dev/null +++ b/exporters/jaeger/assertsocketbuffersize_windows.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package jaeger + +import ( + "net" + "testing" +) + +func assertSockBufferSize(t *testing.T, expectedBytes int, conn *net.UDPConn) bool { + // The Windows implementation of the net.UDPConn does not implement the + // functionality to return a file handle, instead a "not supported" error + // is returned: + // + // https://github.com/golang/go/blob/6cc8aa7ece96aca282db19f08aa5c98ed13695d9/src/net/fd_windows.go#L175-L178 + // + // This means we are not able to pass the connection to a syscall and + // determine the buffer size. + return true +} diff --git a/exporters/jaeger/doc.go b/exporters/jaeger/doc.go new file mode 100644 index 00000000000..c91f7d6a8b1 --- /dev/null +++ b/exporters/jaeger/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jaeger contains an OpenTelemetry tracing exporter for Jaeger. +// +// This package is currently in a pre-GA phase. Backwards incompatible changes +// may be introduced in subsequent minor version releases as we work to track +// the evolving OpenTelemetry specification and user feedback. +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" diff --git a/exporters/jaeger/env.go b/exporters/jaeger/env.go new file mode 100644 index 00000000000..75eada8253c --- /dev/null +++ b/exporters/jaeger/env.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "os" +) + +// Environment variable names +const ( + // Hostname for the Jaeger agent, part of address where exporter sends spans + // i.e. "localhost" + envAgentHost = "OTEL_EXPORTER_JAEGER_AGENT_HOST" + // Port for the Jaeger agent, part of address where exporter sends spans + // i.e. 6832 + envAgentPort = "OTEL_EXPORTER_JAEGER_AGENT_PORT" + // The HTTP endpoint for sending spans directly to a collector, + // i.e. http://jaeger-collector:14268/api/traces. + envEndpoint = "OTEL_EXPORTER_JAEGER_ENDPOINT" + // Username to send as part of "Basic" authentication to the collector endpoint. + envUser = "OTEL_EXPORTER_JAEGER_USER" + // Password to send as part of "Basic" authentication to the collector endpoint. + envPassword = "OTEL_EXPORTER_JAEGER_PASSWORD" +) + +// envOr returns an env variable's value if it is exists or the default if not +func envOr(key, defaultValue string) string { + if v, ok := os.LookupEnv(key); ok && v != "" { + return v + } + return defaultValue +} diff --git a/exporters/jaeger/env_test.go b/exporters/jaeger/env_test.go new file mode 100644 index 00000000000..0a9ee1900ba --- /dev/null +++ b/exporters/jaeger/env_test.go @@ -0,0 +1,242 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ottest "go.opentelemetry.io/otel/internal/internaltest" +) + +func TestNewRawExporterWithDefault(t *testing.T) { + const ( + collectorEndpoint = "http://localhost:14268/api/traces" + username = "" + password = "" + ) + + // Create Jaeger Exporter with default values + exp, err := New( + WithCollectorEndpoint(), + ) + + assert.NoError(t, err) + + require.IsType(t, &collectorUploader{}, exp.uploader) + uploader := exp.uploader.(*collectorUploader) + assert.Equal(t, collectorEndpoint, uploader.endpoint) + assert.Equal(t, username, uploader.username) + assert.Equal(t, password, uploader.password) +} + +func TestNewRawExporterWithEnv(t *testing.T) { + const ( + collectorEndpoint = "http://localhost" + username = "user" + password = "password" + ) + + envStore, err := ottest.SetEnvVariables(map[string]string{ + envEndpoint: collectorEndpoint, + envUser: username, + envPassword: password, + }) + require.NoError(t, err) + defer func() { + require.NoError(t, envStore.Restore()) + }() + + // Create Jaeger Exporter with environment variables + exp, err := New( + WithCollectorEndpoint(), + ) + + assert.NoError(t, err) + + require.IsType(t, &collectorUploader{}, exp.uploader) + uploader := exp.uploader.(*collectorUploader) + assert.Equal(t, collectorEndpoint, uploader.endpoint) + assert.Equal(t, username, uploader.username) + assert.Equal(t, password, uploader.password) +} + +func TestNewRawExporterWithPassedOption(t *testing.T) { + const ( + collectorEndpoint = "http://localhost" + username = "user" + password = "password" + optionEndpoint = "should not be overwritten" + ) + + envStore, err := ottest.SetEnvVariables(map[string]string{ + envEndpoint: collectorEndpoint, + envUser: username, + envPassword: password, + }) + require.NoError(t, err) + defer func() { + require.NoError(t, envStore.Restore()) + }() + + // Create Jaeger Exporter with passed endpoint option, should be used over envEndpoint + exp, err := New( + WithCollectorEndpoint(WithEndpoint(optionEndpoint)), + ) + + assert.NoError(t, err) + + require.IsType(t, &collectorUploader{}, exp.uploader) + uploader := exp.uploader.(*collectorUploader) + assert.Equal(t, optionEndpoint, uploader.endpoint) + assert.Equal(t, username, uploader.username) + assert.Equal(t, password, uploader.password) +} + +func TestEnvOrWithAgentHostPortFromEnv(t *testing.T) { + testCases := []struct { + name string + envAgentHost string + envAgentPort string + defaultHost string + defaultPort string + expectedHost string + expectedPort string + }{ + { + name: "overrides default host/port values via environment variables", + envAgentHost: "localhost", + envAgentPort: "6832", + defaultHost: "hostNameToBeReplaced", + defaultPort: "8203", + expectedHost: "localhost", + expectedPort: "6832", + }, + { + name: "envAgentHost is empty, will not overwrite default host value", + envAgentHost: "", + envAgentPort: "6832", + defaultHost: "hostNameNotToBeReplaced", + defaultPort: "8203", + expectedHost: "hostNameNotToBeReplaced", + expectedPort: "6832", + }, + { + name: "envAgentPort is empty, will not overwrite default port value", + envAgentHost: "localhost", + envAgentPort: "", + defaultHost: "hostNameToBeReplaced", + defaultPort: "8203", + expectedHost: "localhost", + expectedPort: "8203", + }, + { + name: "envAgentHost and envAgentPort are empty, will not overwrite default host/port values", + envAgentHost: "", + envAgentPort: "", + defaultHost: "hostNameNotToBeReplaced", + defaultPort: "8203", + expectedHost: "hostNameNotToBeReplaced", + expectedPort: "8203", + }, + } + + envStore := ottest.NewEnvStore() + envStore.Record(envAgentHost) + envStore.Record(envAgentPort) + defer func() { + require.NoError(t, envStore.Restore()) + }() + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + require.NoError(t, os.Setenv(envAgentHost, tc.envAgentHost)) + require.NoError(t, os.Setenv(envAgentPort, tc.envAgentPort)) + host := envOr(envAgentHost, tc.defaultHost) + port := envOr(envAgentPort, tc.defaultPort) + assert.Equal(t, tc.expectedHost, host) + assert.Equal(t, tc.expectedPort, port) + }) + } +} + +func TestEnvOrWithCollectorEndpointOptionsFromEnv(t *testing.T) { + testCases := []struct { + name string + envEndpoint string + envUsername string + envPassword string + defaultCollectorEndpointOptions collectorEndpointConfig + expectedCollectorEndpointOptions collectorEndpointConfig + }{ + { + name: "overrides value via environment variables", + envEndpoint: "http://localhost:14252", + envUsername: "username", + envPassword: "password", + defaultCollectorEndpointOptions: collectorEndpointConfig{ + endpoint: "endpoint not to be used", + username: "foo", + password: "bar", + }, + expectedCollectorEndpointOptions: collectorEndpointConfig{ + endpoint: "http://localhost:14252", + username: "username", + password: "password", + }, + }, + { + name: "environment variables is empty, will not overwrite value", + envEndpoint: "", + envUsername: "", + envPassword: "", + defaultCollectorEndpointOptions: collectorEndpointConfig{ + endpoint: "endpoint to be used", + username: "foo", + password: "bar", + }, + expectedCollectorEndpointOptions: collectorEndpointConfig{ + endpoint: "endpoint to be used", + username: "foo", + password: "bar", + }, + }, + } + + envStore := ottest.NewEnvStore() + envStore.Record(envEndpoint) + envStore.Record(envUser) + envStore.Record(envPassword) + defer func() { + require.NoError(t, envStore.Restore()) + }() + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + require.NoError(t, os.Setenv(envEndpoint, tc.envEndpoint)) + require.NoError(t, os.Setenv(envUser, tc.envUsername)) + require.NoError(t, os.Setenv(envPassword, tc.envPassword)) + + endpoint := envOr(envEndpoint, tc.defaultCollectorEndpointOptions.endpoint) + username := envOr(envUser, tc.defaultCollectorEndpointOptions.username) + password := envOr(envPassword, tc.defaultCollectorEndpointOptions.password) + + assert.Equal(t, tc.expectedCollectorEndpointOptions.endpoint, endpoint) + assert.Equal(t, tc.expectedCollectorEndpointOptions.username, username) + assert.Equal(t, tc.expectedCollectorEndpointOptions.password, password) + }) + } +} diff --git a/exporters/jaeger/go.mod b/exporters/jaeger/go.mod new file mode 100644 index 00000000000..37d3da62047 --- /dev/null +++ b/exporters/jaeger/go.mod @@ -0,0 +1,75 @@ +module go.opentelemetry.io/otel/exporters/jaeger + +go 1.15 + +require ( + github.com/google/go-cmp v0.5.6 + github.com/stretchr/testify v1.7.0 + go.opentelemetry.io/otel v0.20.0 + go.opentelemetry.io/otel/sdk v0.20.0 + go.opentelemetry.io/otel/trace v0.20.0 +) + +replace go.opentelemetry.io/otel/bridge/opencensus => ../../bridge/opencensus + +replace go.opentelemetry.io/otel/bridge/opentracing => ../../bridge/opentracing + +replace go.opentelemetry.io/otel/example/jaeger => ../../example/jaeger + +replace go.opentelemetry.io/otel/example/namedtracer => ../../example/namedtracer + +replace go.opentelemetry.io/otel/example/opencensus => ../../example/opencensus + +replace go.opentelemetry.io/otel/example/otel-collector => ../../example/otel-collector + +replace go.opentelemetry.io/otel/example/prom-collector => ../../example/prom-collector + +replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus + +replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin + +replace go.opentelemetry.io/otel/exporters/prometheus => ../prometheus + +replace go.opentelemetry.io/otel/exporters/otlp => ../otlp + +replace go.opentelemetry.io/otel/exporters/stdout => ../stdout + +replace go.opentelemetry.io/otel/exporters/jaeger => ./ + +replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools + +replace go.opentelemetry.io/otel/metric => ../../metric + +replace go.opentelemetry.io/otel/oteltest => ../../oteltest + +replace go.opentelemetry.io/otel/sdk/export/metric => ../../sdk/export/metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric + +replace go.opentelemetry.io/otel/trace => ../../trace + +replace go.opentelemetry.io/otel/example/passthrough => ../../example/passthrough + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../otlp/otlptrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../otlp/otlptrace/otlptracegrpc + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../otlp/otlptrace/otlptracehttp + +replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric + +replace go.opentelemetry.io/otel => ../.. + +replace go.opentelemetry.io/otel/exporters/zipkin => ../zipkin + +replace go.opentelemetry.io/otel/sdk => ../../sdk + +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../trace/zipkin + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../otlp/otlpmetric + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../otlp/otlpmetric/otlpmetricgrpc diff --git a/exporters/jaeger/go.sum b/exporters/jaeger/go.sum new file mode 100644 index 00000000000..e8ee16a18cd --- /dev/null +++ b/exporters/jaeger/go.sum @@ -0,0 +1,16 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go b/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go new file mode 100644 index 00000000000..54cd3b0867a --- /dev/null +++ b/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +var GoUnusedProtection__ int; + diff --git a/exporters/jaeger/internal/gen-go/agent/agent-consts.go b/exporters/jaeger/internal/gen-go/agent/agent-consts.go new file mode 100644 index 00000000000..3b96e3222ee --- /dev/null +++ b/exporters/jaeger/internal/gen-go/agent/agent-consts.go @@ -0,0 +1,27 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +import ( + "bytes" + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ + +func init() { +} diff --git a/exporters/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go b/exporters/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go new file mode 100755 index 00000000000..9ec0d40a865 --- /dev/null +++ b/exporters/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go @@ -0,0 +1,210 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package main + +import ( + "context" + "flag" + "fmt" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent" + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ +var _ = agent.GoUnusedProtection__ + +func Usage() { + fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\nFunctions:") + fmt.Fprintln(os.Stderr, " void emitZipkinBatch( spans)") + fmt.Fprintln(os.Stderr, " void emitBatch(Batch batch)") + fmt.Fprintln(os.Stderr) + os.Exit(0) +} + +type httpHeaders map[string]string + +func (h httpHeaders) String() string { + var m map[string]string = h + return fmt.Sprintf("%s", m) +} + +func (h httpHeaders) Set(value string) error { + parts := strings.Split(value, ": ") + if len(parts) != 2 { + return fmt.Errorf("header should be of format 'Key: Value'") + } + h[parts[0]] = parts[1] + return nil +} + +func main() { + flag.Usage = Usage + var host string + var port int + var protocol string + var urlString string + var framed bool + var useHttp bool + headers := make(httpHeaders) + var parsedUrl *url.URL + var trans thrift.TTransport + _ = strconv.Atoi + _ = math.Abs + flag.Usage = Usage + flag.StringVar(&host, "h", "localhost", "Specify host and port") + flag.IntVar(&port, "p", 9090, "Specify port") + flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") + flag.StringVar(&urlString, "u", "", "Specify the url") + flag.BoolVar(&framed, "framed", false, "Use framed transport") + flag.BoolVar(&useHttp, "http", false, "Use http") + flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")") + flag.Parse() + + if len(urlString) > 0 { + var err error + parsedUrl, err = url.Parse(urlString) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + host = parsedUrl.Host + useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https" + } else if useHttp { + _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + } + + cmd := flag.Arg(0) + var err error + if useHttp { + trans, err = thrift.NewTHttpClient(parsedUrl.String()) + if len(headers) > 0 { + httptrans := trans.(*thrift.THttpClient) + for key, value := range headers { + httptrans.SetHeader(key, value) + } + } + } else { + portStr := fmt.Sprint(port) + if strings.Contains(host, ":") { + host, portStr, err = net.SplitHostPort(host) + if err != nil { + fmt.Fprintln(os.Stderr, "error with host:", err) + os.Exit(1) + } + } + trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) + if err != nil { + fmt.Fprintln(os.Stderr, "error resolving address:", err) + os.Exit(1) + } + if framed { + trans = thrift.NewTFramedTransport(trans) + } + } + if err != nil { + fmt.Fprintln(os.Stderr, "Error creating transport", err) + os.Exit(1) + } + defer trans.Close() + var protocolFactory thrift.TProtocolFactory + switch protocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + break + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + break + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + break + case "binary", "": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + break + default: + fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) + Usage() + os.Exit(1) + } + iprot := protocolFactory.GetProtocol(trans) + oprot := protocolFactory.GetProtocol(trans) + client := agent.NewAgentClient(thrift.NewTStandardClient(iprot, oprot)) + if err := trans.Open(); err != nil { + fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) + os.Exit(1) + } + + switch cmd { + case "emitZipkinBatch": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "EmitZipkinBatch requires 1 args") + flag.Usage() + } + arg5 := flag.Arg(1) + mbTrans6 := thrift.NewTMemoryBufferLen(len(arg5)) + defer mbTrans6.Close() + _, err7 := mbTrans6.WriteString(arg5) + if err7 != nil { + Usage() + return + } + factory8 := thrift.NewTJSONProtocolFactory() + jsProt9 := factory8.GetProtocol(mbTrans6) + containerStruct0 := agent.NewAgentEmitZipkinBatchArgs() + err10 := containerStruct0.ReadField1(context.Background(), jsProt9) + if err10 != nil { + Usage() + return + } + argvalue0 := containerStruct0.Spans + value0 := argvalue0 + fmt.Print(client.EmitZipkinBatch(context.Background(), value0)) + fmt.Print("\n") + break + case "emitBatch": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "EmitBatch requires 1 args") + flag.Usage() + } + arg11 := flag.Arg(1) + mbTrans12 := thrift.NewTMemoryBufferLen(len(arg11)) + defer mbTrans12.Close() + _, err13 := mbTrans12.WriteString(arg11) + if err13 != nil { + Usage() + return + } + factory14 := thrift.NewTJSONProtocolFactory() + jsProt15 := factory14.GetProtocol(mbTrans12) + argvalue0 := jaeger.NewBatch() + err16 := argvalue0.Read(context.Background(), jsProt15) + if err16 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.EmitBatch(context.Background(), value0)) + fmt.Print("\n") + break + case "": + Usage() + break + default: + fmt.Fprintln(os.Stderr, "Invalid function ", cmd) + } +} diff --git a/exporters/jaeger/internal/gen-go/agent/agent.go b/exporters/jaeger/internal/gen-go/agent/agent.go new file mode 100644 index 00000000000..c7c8e9ca3e6 --- /dev/null +++ b/exporters/jaeger/internal/gen-go/agent/agent.go @@ -0,0 +1,412 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +import ( + "bytes" + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ + +type Agent interface { + // Parameters: + // - Spans + EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) + // Parameters: + // - Batch + EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) +} + +type AgentClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { + return &AgentClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { + return &AgentClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewAgentClient(c thrift.TClient) *AgentClient { + return &AgentClient{ + c: c, + } +} + +func (p *AgentClient) Client_() thrift.TClient { + return p.c +} + +func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Spans +func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) { + var _args0 AgentEmitZipkinBatchArgs + _args0.Spans = spans + p.SetLastResponseMeta_(thrift.ResponseMeta{}) + if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil { + return err + } + return nil +} + +// Parameters: +// - Batch +func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) { + var _args1 AgentEmitBatchArgs + _args1.Batch = batch + p.SetLastResponseMeta_(thrift.ResponseMeta{}) + if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil { + return err + } + return nil +} + +type AgentProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Agent +} + +func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewAgentProcessor(handler Agent) *AgentProcessor { + + self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler} + self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} + return self2 +} + +func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { + return false, thrift.WrapTException(err2) + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x3.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x3 + +} + +type agentProcessorEmitZipkinBatch struct { + handler Agent +} + +func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitZipkinBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + _ = tickerCancel + + if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil { + tickerCancel() + return true, thrift.WrapTException(err2) + } + tickerCancel() + return true, nil +} + +type agentProcessorEmitBatch struct { + handler Agent +} + +func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + _ = tickerCancel + + if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil { + tickerCancel() + return true, thrift.WrapTException(err2) + } + tickerCancel() + return true, nil +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Spans +type AgentEmitZipkinBatchArgs struct { + Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"` +} + +func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { + return &AgentEmitZipkinBatchArgs{} +} + +func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { + return p.Spans +} +func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*zipkincore.Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem4 := &zipkincore.Span{} + if err := _elem4.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Spans = append(p.Spans, _elem4) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) + } + return err +} + +func (p *AgentEmitZipkinBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) +} + +// Attributes: +// - Batch +type AgentEmitBatchArgs struct { + Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"` +} + +func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { + return &AgentEmitBatchArgs{} +} + +var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch + +func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { + if !p.IsSetBatch() { + return AgentEmitBatchArgs_Batch_DEFAULT + } + return p.Batch +} +func (p *AgentEmitBatchArgs) IsSetBatch() bool { + return p.Batch != nil +} + +func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Batch = &jaeger.Batch{} + if err := p.Batch.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) + } + if err := p.Batch.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) + } + return err +} + +func (p *AgentEmitBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) +} diff --git a/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go b/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go new file mode 100644 index 00000000000..fe45a9f9ad2 --- /dev/null +++ b/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package jaeger + +var GoUnusedProtection__ int; + diff --git a/exporters/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go b/exporters/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go new file mode 100755 index 00000000000..cacf75d00f5 --- /dev/null +++ b/exporters/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go @@ -0,0 +1,180 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package main + +import ( + "context" + "flag" + "fmt" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +var _ = jaeger.GoUnusedProtection__ + +func Usage() { + fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\nFunctions:") + fmt.Fprintln(os.Stderr, " submitBatches( batches)") + fmt.Fprintln(os.Stderr) + os.Exit(0) +} + +type httpHeaders map[string]string + +func (h httpHeaders) String() string { + var m map[string]string = h + return fmt.Sprintf("%s", m) +} + +func (h httpHeaders) Set(value string) error { + parts := strings.Split(value, ": ") + if len(parts) != 2 { + return fmt.Errorf("header should be of format 'Key: Value'") + } + h[parts[0]] = parts[1] + return nil +} + +func main() { + flag.Usage = Usage + var host string + var port int + var protocol string + var urlString string + var framed bool + var useHttp bool + headers := make(httpHeaders) + var parsedUrl *url.URL + var trans thrift.TTransport + _ = strconv.Atoi + _ = math.Abs + flag.Usage = Usage + flag.StringVar(&host, "h", "localhost", "Specify host and port") + flag.IntVar(&port, "p", 9090, "Specify port") + flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") + flag.StringVar(&urlString, "u", "", "Specify the url") + flag.BoolVar(&framed, "framed", false, "Use framed transport") + flag.BoolVar(&useHttp, "http", false, "Use http") + flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")") + flag.Parse() + + if len(urlString) > 0 { + var err error + parsedUrl, err = url.Parse(urlString) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + host = parsedUrl.Host + useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https" + } else if useHttp { + _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + } + + cmd := flag.Arg(0) + var err error + if useHttp { + trans, err = thrift.NewTHttpClient(parsedUrl.String()) + if len(headers) > 0 { + httptrans := trans.(*thrift.THttpClient) + for key, value := range headers { + httptrans.SetHeader(key, value) + } + } + } else { + portStr := fmt.Sprint(port) + if strings.Contains(host, ":") { + host, portStr, err = net.SplitHostPort(host) + if err != nil { + fmt.Fprintln(os.Stderr, "error with host:", err) + os.Exit(1) + } + } + trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) + if err != nil { + fmt.Fprintln(os.Stderr, "error resolving address:", err) + os.Exit(1) + } + if framed { + trans = thrift.NewTFramedTransport(trans) + } + } + if err != nil { + fmt.Fprintln(os.Stderr, "Error creating transport", err) + os.Exit(1) + } + defer trans.Close() + var protocolFactory thrift.TProtocolFactory + switch protocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + break + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + break + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + break + case "binary", "": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + break + default: + fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) + Usage() + os.Exit(1) + } + iprot := protocolFactory.GetProtocol(trans) + oprot := protocolFactory.GetProtocol(trans) + client := jaeger.NewCollectorClient(thrift.NewTStandardClient(iprot, oprot)) + if err := trans.Open(); err != nil { + fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) + os.Exit(1) + } + + switch cmd { + case "submitBatches": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "SubmitBatches requires 1 args") + flag.Usage() + } + arg19 := flag.Arg(1) + mbTrans20 := thrift.NewTMemoryBufferLen(len(arg19)) + defer mbTrans20.Close() + _, err21 := mbTrans20.WriteString(arg19) + if err21 != nil { + Usage() + return + } + factory22 := thrift.NewTJSONProtocolFactory() + jsProt23 := factory22.GetProtocol(mbTrans20) + containerStruct0 := jaeger.NewCollectorSubmitBatchesArgs() + err24 := containerStruct0.ReadField1(context.Background(), jsProt23) + if err24 != nil { + Usage() + return + } + argvalue0 := containerStruct0.Batches + value0 := argvalue0 + fmt.Print(client.SubmitBatches(context.Background(), value0)) + fmt.Print("\n") + break + case "": + Usage() + break + default: + fmt.Fprintln(os.Stderr, "Invalid function ", cmd) + } +} diff --git a/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go b/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go new file mode 100644 index 00000000000..10162857fbb --- /dev/null +++ b/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go @@ -0,0 +1,22 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package jaeger + +import ( + "bytes" + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +func init() { +} diff --git a/exporters/jaeger/internal/gen-go/jaeger/jaeger.go b/exporters/jaeger/internal/gen-go/jaeger/jaeger.go new file mode 100644 index 00000000000..b1fe26c57d9 --- /dev/null +++ b/exporters/jaeger/internal/gen-go/jaeger/jaeger.go @@ -0,0 +1,3022 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package jaeger + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +type TagType int64 + +const ( + TagType_STRING TagType = 0 + TagType_DOUBLE TagType = 1 + TagType_BOOL TagType = 2 + TagType_LONG TagType = 3 + TagType_BINARY TagType = 4 +) + +func (p TagType) String() string { + switch p { + case TagType_STRING: + return "STRING" + case TagType_DOUBLE: + return "DOUBLE" + case TagType_BOOL: + return "BOOL" + case TagType_LONG: + return "LONG" + case TagType_BINARY: + return "BINARY" + } + return "" +} + +func TagTypeFromString(s string) (TagType, error) { + switch s { + case "STRING": + return TagType_STRING, nil + case "DOUBLE": + return TagType_DOUBLE, nil + case "BOOL": + return TagType_BOOL, nil + case "LONG": + return TagType_LONG, nil + case "BINARY": + return TagType_BINARY, nil + } + return TagType(0), fmt.Errorf("not a valid TagType string") +} + +func TagTypePtr(v TagType) *TagType { return &v } + +func (p TagType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *TagType) UnmarshalText(text []byte) error { + q, err := TagTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *TagType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = TagType(v) + return nil +} + +func (p *TagType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type SpanRefType int64 + +const ( + SpanRefType_CHILD_OF SpanRefType = 0 + SpanRefType_FOLLOWS_FROM SpanRefType = 1 +) + +func (p SpanRefType) String() string { + switch p { + case SpanRefType_CHILD_OF: + return "CHILD_OF" + case SpanRefType_FOLLOWS_FROM: + return "FOLLOWS_FROM" + } + return "" +} + +func SpanRefTypeFromString(s string) (SpanRefType, error) { + switch s { + case "CHILD_OF": + return SpanRefType_CHILD_OF, nil + case "FOLLOWS_FROM": + return SpanRefType_FOLLOWS_FROM, nil + } + return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") +} + +func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } + +func (p SpanRefType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *SpanRefType) UnmarshalText(text []byte) error { + q, err := SpanRefTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *SpanRefType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = SpanRefType(v) + return nil +} + +func (p *SpanRefType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Key +// - VType +// - VStr +// - VDouble +// - VBool +// - VLong +// - VBinary +type Tag struct { + Key string `thrift:"key,1,required" db:"key" json:"key"` + VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"` + VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"` + VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"` + VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"` + VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"` + VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"` +} + +func NewTag() *Tag { + return &Tag{} +} + +func (p *Tag) GetKey() string { + return p.Key +} + +func (p *Tag) GetVType() TagType { + return p.VType +} + +var Tag_VStr_DEFAULT string + +func (p *Tag) GetVStr() string { + if !p.IsSetVStr() { + return Tag_VStr_DEFAULT + } + return *p.VStr +} + +var Tag_VDouble_DEFAULT float64 + +func (p *Tag) GetVDouble() float64 { + if !p.IsSetVDouble() { + return Tag_VDouble_DEFAULT + } + return *p.VDouble +} + +var Tag_VBool_DEFAULT bool + +func (p *Tag) GetVBool() bool { + if !p.IsSetVBool() { + return Tag_VBool_DEFAULT + } + return *p.VBool +} + +var Tag_VLong_DEFAULT int64 + +func (p *Tag) GetVLong() int64 { + if !p.IsSetVLong() { + return Tag_VLong_DEFAULT + } + return *p.VLong +} + +var Tag_VBinary_DEFAULT []byte + +func (p *Tag) GetVBinary() []byte { + return p.VBinary +} +func (p *Tag) IsSetVStr() bool { + return p.VStr != nil +} + +func (p *Tag) IsSetVDouble() bool { + return p.VDouble != nil +} + +func (p *Tag) IsSetVBool() bool { + return p.VBool != nil +} + +func (p *Tag) IsSetVLong() bool { + return p.VLong != nil +} + +func (p *Tag) IsSetVBinary() bool { + return p.VBinary != nil +} + +func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetKey bool = false + var issetVType bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetKey = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetVType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetKey { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")) + } + if !issetVType { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")) + } + return nil +} + +func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := TagType(v) + p.VType = temp + } + return nil +} + +func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.VStr = &v + } + return nil +} + +func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.VDouble = &v + } + return nil +} + +func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.VBool = &v + } + return nil +} + +func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.VLong = &v + } + return nil +} + +func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.VBinary = v + } + return nil +} + +func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField7(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) + } + return err +} + +func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVStr() { + if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) + } + if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) + } + } + return err +} + +func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVDouble() { + if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) + } + if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) + } + } + return err +} + +func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVBool() { + if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) + } + } + return err +} + +func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVLong() { + if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) + } + } + return err +} + +func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVBinary() { + if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.VBinary); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) + } + } + return err +} + +func (p *Tag) Equals(other *Tag) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if p.VType != other.VType { + return false + } + if p.VStr != other.VStr { + if p.VStr == nil || other.VStr == nil { + return false + } + if (*p.VStr) != (*other.VStr) { + return false + } + } + if p.VDouble != other.VDouble { + if p.VDouble == nil || other.VDouble == nil { + return false + } + if (*p.VDouble) != (*other.VDouble) { + return false + } + } + if p.VBool != other.VBool { + if p.VBool == nil || other.VBool == nil { + return false + } + if (*p.VBool) != (*other.VBool) { + return false + } + } + if p.VLong != other.VLong { + if p.VLong == nil || other.VLong == nil { + return false + } + if (*p.VLong) != (*other.VLong) { + return false + } + } + if bytes.Compare(p.VBinary, other.VBinary) != 0 { + return false + } + return true +} + +func (p *Tag) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Tag(%+v)", *p) +} + +// Attributes: +// - Timestamp +// - Fields +type Log struct { + Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"` + Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"` +} + +func NewLog() *Log { + return &Log{} +} + +func (p *Log) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Log) GetFields() []*Tag { + return p.Fields +} +func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTimestamp bool = false + var issetFields bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetTimestamp = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetFields = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTimestamp { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")) + } + if !issetFields { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")) + } + return nil +} + +func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Fields = tSlice + for i := 0; i < size; i++ { + _elem0 := &Tag{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Fields = append(p.Fields, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Log"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Fields { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) + } + return err +} + +func (p *Log) Equals(other *Log) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if len(p.Fields) != len(other.Fields) { + return false + } + for i, _tgt := range p.Fields { + _src1 := other.Fields[i] + if !_tgt.Equals(_src1) { + return false + } + } + return true +} + +func (p *Log) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Log(%+v)", *p) +} + +// Attributes: +// - RefType +// - TraceIdLow +// - TraceIdHigh +// - SpanId +type SpanRef struct { + RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"` + TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"` + TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"` + SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"` +} + +func NewSpanRef() *SpanRef { + return &SpanRef{} +} + +func (p *SpanRef) GetRefType() SpanRefType { + return p.RefType +} + +func (p *SpanRef) GetTraceIdLow() int64 { + return p.TraceIdLow +} + +func (p *SpanRef) GetTraceIdHigh() int64 { + return p.TraceIdHigh +} + +func (p *SpanRef) GetSpanId() int64 { + return p.SpanId +} +func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetRefType bool = false + var issetTraceIdLow bool = false + var issetTraceIdHigh bool = false + var issetSpanId bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetRefType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTraceIdLow = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetTraceIdHigh = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetSpanId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetRefType { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")) + } + if !issetTraceIdLow { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) + } + if !issetTraceIdHigh { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) + } + if !issetSpanId { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) + } + return nil +} + +func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := SpanRefType(v) + p.RefType = temp + } + return nil +} + +func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TraceIdLow = v + } + return nil +} + +func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TraceIdHigh = v + } + return nil +} + +func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.SpanId = v + } + return nil +} + +func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) + } + return err +} + +func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) + } + return err +} + +func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) + } + return err +} + +func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) + } + return err +} + +func (p *SpanRef) Equals(other *SpanRef) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.RefType != other.RefType { + return false + } + if p.TraceIdLow != other.TraceIdLow { + return false + } + if p.TraceIdHigh != other.TraceIdHigh { + return false + } + if p.SpanId != other.SpanId { + return false + } + return true +} + +func (p *SpanRef) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SpanRef(%+v)", *p) +} + +// Attributes: +// - TraceIdLow +// - TraceIdHigh +// - SpanId +// - ParentSpanId +// - OperationName +// - References +// - Flags +// - StartTime +// - Duration +// - Tags +// - Logs +type Span struct { + TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"` + TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"` + SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"` + ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"` + OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"` + References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"` + Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"` + StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"` + Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"` + Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"` + Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceIdLow() int64 { + return p.TraceIdLow +} + +func (p *Span) GetTraceIdHigh() int64 { + return p.TraceIdHigh +} + +func (p *Span) GetSpanId() int64 { + return p.SpanId +} + +func (p *Span) GetParentSpanId() int64 { + return p.ParentSpanId +} + +func (p *Span) GetOperationName() string { + return p.OperationName +} + +var Span_References_DEFAULT []*SpanRef + +func (p *Span) GetReferences() []*SpanRef { + return p.References +} + +func (p *Span) GetFlags() int32 { + return p.Flags +} + +func (p *Span) GetStartTime() int64 { + return p.StartTime +} + +func (p *Span) GetDuration() int64 { + return p.Duration +} + +var Span_Tags_DEFAULT []*Tag + +func (p *Span) GetTags() []*Tag { + return p.Tags +} + +var Span_Logs_DEFAULT []*Log + +func (p *Span) GetLogs() []*Log { + return p.Logs +} +func (p *Span) IsSetReferences() bool { + return p.References != nil +} + +func (p *Span) IsSetTags() bool { + return p.Tags != nil +} + +func (p *Span) IsSetLogs() bool { + return p.Logs != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTraceIdLow bool = false + var issetTraceIdHigh bool = false + var issetSpanId bool = false + var issetParentSpanId bool = false + var issetOperationName bool = false + var issetFlags bool = false + var issetStartTime bool = false + var issetDuration bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetTraceIdLow = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTraceIdHigh = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetSpanId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetParentSpanId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + issetOperationName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + issetFlags = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I64 { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + issetStartTime = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I64 { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + issetDuration = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.LIST { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.LIST { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTraceIdLow { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) + } + if !issetTraceIdHigh { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) + } + if !issetSpanId { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) + } + if !issetParentSpanId { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")) + } + if !issetOperationName { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")) + } + if !issetFlags { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")) + } + if !issetStartTime { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")) + } + if !issetDuration { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceIdLow = v + } + return nil +} + +func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TraceIdHigh = v + } + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SpanId = v + } + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ParentSpanId = v + } + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.OperationName = v + } + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*SpanRef, 0, size) + p.References = tSlice + for i := 0; i < size; i++ { + _elem2 := &SpanRef{} + if err := _elem2.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.References = append(p.References, _elem2) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.Flags = v + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.StartTime = v + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Duration = v + } + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i++ { + _elem3 := &Tag{} + if err := _elem3.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.Tags = append(p.Tags, _elem3) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Log, 0, size) + p.Logs = tSlice + for i := 0; i < size; i++ { + _elem4 := &Log{} + if err := _elem4.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Logs = append(p.Logs, _elem4) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField7(ctx, oprot); err != nil { + return err + } + if err := p.writeField8(ctx, oprot); err != nil { + return err + } + if err := p.writeField9(ctx, oprot); err != nil { + return err + } + if err := p.writeField10(ctx, oprot); err != nil { + return err + } + if err := p.writeField11(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) + } + return err +} + +func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) + } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) + } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) + } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetReferences() { + if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.References { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) + } + } + return err +} + +func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) + } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) + } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTags() { + if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Tags { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetLogs() { + if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Logs { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) + } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceIdLow != other.TraceIdLow { + return false + } + if p.TraceIdHigh != other.TraceIdHigh { + return false + } + if p.SpanId != other.SpanId { + return false + } + if p.ParentSpanId != other.ParentSpanId { + return false + } + if p.OperationName != other.OperationName { + return false + } + if len(p.References) != len(other.References) { + return false + } + for i, _tgt := range p.References { + _src5 := other.References[i] + if !_tgt.Equals(_src5) { + return false + } + } + if p.Flags != other.Flags { + return false + } + if p.StartTime != other.StartTime { + return false + } + if p.Duration != other.Duration { + return false + } + if len(p.Tags) != len(other.Tags) { + return false + } + for i, _tgt := range p.Tags { + _src6 := other.Tags[i] + if !_tgt.Equals(_src6) { + return false + } + } + if len(p.Logs) != len(other.Logs) { + return false + } + for i, _tgt := range p.Logs { + _src7 := other.Logs[i] + if !_tgt.Equals(_src7) { + return false + } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + +// Attributes: +// - ServiceName +// - Tags +type Process struct { + ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"` + Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"` +} + +func NewProcess() *Process { + return &Process{} +} + +func (p *Process) GetServiceName() string { + return p.ServiceName +} + +var Process_Tags_DEFAULT []*Tag + +func (p *Process) GetTags() []*Tag { + return p.Tags +} +func (p *Process) IsSetTags() bool { + return p.Tags != nil +} + +func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetServiceName bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetServiceName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetServiceName { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")) + } + return nil +} + +func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i++ { + _elem8 := &Tag{} + if err := _elem8.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) + } + p.Tags = append(p.Tags, _elem8) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Process"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) + } + return err +} + +func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTags() { + if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Tags { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) + } + } + return err +} + +func (p *Process) Equals(other *Process) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if len(p.Tags) != len(other.Tags) { + return false + } + for i, _tgt := range p.Tags { + _src9 := other.Tags[i] + if !_tgt.Equals(_src9) { + return false + } + } + return true +} + +func (p *Process) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Process(%+v)", *p) +} + +// Attributes: +// - FullQueueDroppedSpans +// - TooLargeDroppedSpans +// - FailedToEmitSpans +type ClientStats struct { + FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"` + TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"` + FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"` +} + +func NewClientStats() *ClientStats { + return &ClientStats{} +} + +func (p *ClientStats) GetFullQueueDroppedSpans() int64 { + return p.FullQueueDroppedSpans +} + +func (p *ClientStats) GetTooLargeDroppedSpans() int64 { + return p.TooLargeDroppedSpans +} + +func (p *ClientStats) GetFailedToEmitSpans() int64 { + return p.FailedToEmitSpans +} +func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetFullQueueDroppedSpans bool = false + var issetTooLargeDroppedSpans bool = false + var issetFailedToEmitSpans bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetFullQueueDroppedSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTooLargeDroppedSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetFailedToEmitSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetFullQueueDroppedSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set")) + } + if !issetTooLargeDroppedSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set")) + } + if !issetFailedToEmitSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set")) + } + return nil +} + +func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.FullQueueDroppedSpans = v + } + return nil +} + +func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TooLargeDroppedSpans = v + } + return nil +} + +func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.FailedToEmitSpans = v + } + return nil +} + +func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) + } + return err +} + +func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) + } + return err +} + +func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) + } + return err +} + +func (p *ClientStats) Equals(other *ClientStats) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { + return false + } + if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { + return false + } + if p.FailedToEmitSpans != other.FailedToEmitSpans { + return false + } + return true +} + +func (p *ClientStats) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ClientStats(%+v)", *p) +} + +// Attributes: +// - Process +// - Spans +// - SeqNo +// - Stats +type Batch struct { + Process *Process `thrift:"process,1,required" db:"process" json:"process"` + Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"` + SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"` + Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"` +} + +func NewBatch() *Batch { + return &Batch{} +} + +var Batch_Process_DEFAULT *Process + +func (p *Batch) GetProcess() *Process { + if !p.IsSetProcess() { + return Batch_Process_DEFAULT + } + return p.Process +} + +func (p *Batch) GetSpans() []*Span { + return p.Spans +} + +var Batch_SeqNo_DEFAULT int64 + +func (p *Batch) GetSeqNo() int64 { + if !p.IsSetSeqNo() { + return Batch_SeqNo_DEFAULT + } + return *p.SeqNo +} + +var Batch_Stats_DEFAULT *ClientStats + +func (p *Batch) GetStats() *ClientStats { + if !p.IsSetStats() { + return Batch_Stats_DEFAULT + } + return p.Stats +} +func (p *Batch) IsSetProcess() bool { + return p.Process != nil +} + +func (p *Batch) IsSetSeqNo() bool { + return p.SeqNo != nil +} + +func (p *Batch) IsSetStats() bool { + return p.Stats != nil +} + +func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetProcess bool = false + var issetSpans bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetProcess = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetProcess { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")) + } + if !issetSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")) + } + return nil +} + +func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Process = &Process{} + if err := p.Process.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) + } + return nil +} + +func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem10 := &Span{} + if err := _elem10.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Spans = append(p.Spans, _elem10) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SeqNo = &v + } + return nil +} + +func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Stats = &ClientStats{} + if err := p.Stats.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err) + } + return nil +} + +func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) + } + if err := p.Process.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) + } + return err +} + +func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) + } + return err +} + +func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSeqNo() { + if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) + } + } + return err +} + +func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStats() { + if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) + } + if err := p.Stats.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) + } + } + return err +} + +func (p *Batch) Equals(other *Batch) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Process.Equals(other.Process) { + return false + } + if len(p.Spans) != len(other.Spans) { + return false + } + for i, _tgt := range p.Spans { + _src11 := other.Spans[i] + if !_tgt.Equals(_src11) { + return false + } + } + if p.SeqNo != other.SeqNo { + if p.SeqNo == nil || other.SeqNo == nil { + return false + } + if (*p.SeqNo) != (*other.SeqNo) { + return false + } + } + if !p.Stats.Equals(other.Stats) { + return false + } + return true +} + +func (p *Batch) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Batch(%+v)", *p) +} + +// Attributes: +// - Ok +type BatchSubmitResponse struct { + Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` +} + +func NewBatchSubmitResponse() *BatchSubmitResponse { + return &BatchSubmitResponse{} +} + +func (p *BatchSubmitResponse) GetOk() bool { + return p.Ok +} +func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOk bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOk = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOk { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) + } + return nil +} + +func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ok = v + } + return nil +} + +func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) + } + return err +} + +func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ok != other.Ok { + return false + } + return true +} + +func (p *BatchSubmitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) +} + +type Collector interface { + // Parameters: + // - Batches + SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) +} + +type CollectorClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient { + return &CollectorClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient { + return &CollectorClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewCollectorClient(c thrift.TClient) *CollectorClient { + return &CollectorClient{ + c: c, + } +} + +func (p *CollectorClient) Client_() thrift.TClient { + return p.c +} + +func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Batches +func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) { + var _args12 CollectorSubmitBatchesArgs + _args12.Batches = batches + var _result14 CollectorSubmitBatchesResult + var _meta13 thrift.ResponseMeta + _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14) + p.SetLastResponseMeta_(_meta13) + if _err != nil { + return + } + return _result14.GetSuccess(), nil +} + +type CollectorProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Collector +} + +func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewCollectorProcessor(handler Collector) *CollectorProcessor { + + self15 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler} + return self15 +} + +func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { + return false, thrift.WrapTException(err2) + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x16.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x16 + +} + +type collectorProcessorSubmitBatches struct { + handler Collector +} + +func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := CollectorSubmitBatchesArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) + } + + result := CollectorSubmitBatchesResult{} + var retval []*BatchSubmitResponse + if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: "+err2.Error()) + oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return true, thrift.WrapTException(err2) + } else { + result.Success = retval + } + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Batches +type CollectorSubmitBatchesArgs struct { + Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"` +} + +func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs { + return &CollectorSubmitBatchesArgs{} +} + +func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch { + return p.Batches +} +func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Batch, 0, size) + p.Batches = tSlice + for i := 0; i < size; i++ { + _elem17 := &Batch{} + if err := _elem17.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) + } + p.Batches = append(p.Batches, _elem17) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Batches { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) + } + return err +} + +func (p *CollectorSubmitBatchesArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p) +} + +// Attributes: +// - Success +type CollectorSubmitBatchesResult struct { + Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult { + return &CollectorSubmitBatchesResult{} +} + +var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse + +func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse { + return p.Success +} +func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.LIST { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BatchSubmitResponse, 0, size) + p.Success = tSlice + for i := 0; i < size; i++ { + _elem18 := &BatchSubmitResponse{} + if err := _elem18.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err) + } + p.Success = append(p.Success, _elem18) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *CollectorSubmitBatchesResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p) +} diff --git a/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go b/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go new file mode 100644 index 00000000000..ebf43018fe7 --- /dev/null +++ b/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +var GoUnusedProtection__ int; + diff --git a/exporters/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go b/exporters/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go new file mode 100755 index 00000000000..127f67d05a1 --- /dev/null +++ b/exporters/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go @@ -0,0 +1,180 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package main + +import ( + "context" + "flag" + "fmt" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +var _ = zipkincore.GoUnusedProtection__ + +func Usage() { + fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\nFunctions:") + fmt.Fprintln(os.Stderr, " submitZipkinBatch( spans)") + fmt.Fprintln(os.Stderr) + os.Exit(0) +} + +type httpHeaders map[string]string + +func (h httpHeaders) String() string { + var m map[string]string = h + return fmt.Sprintf("%s", m) +} + +func (h httpHeaders) Set(value string) error { + parts := strings.Split(value, ": ") + if len(parts) != 2 { + return fmt.Errorf("header should be of format 'Key: Value'") + } + h[parts[0]] = parts[1] + return nil +} + +func main() { + flag.Usage = Usage + var host string + var port int + var protocol string + var urlString string + var framed bool + var useHttp bool + headers := make(httpHeaders) + var parsedUrl *url.URL + var trans thrift.TTransport + _ = strconv.Atoi + _ = math.Abs + flag.Usage = Usage + flag.StringVar(&host, "h", "localhost", "Specify host and port") + flag.IntVar(&port, "p", 9090, "Specify port") + flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") + flag.StringVar(&urlString, "u", "", "Specify the url") + flag.BoolVar(&framed, "framed", false, "Use framed transport") + flag.BoolVar(&useHttp, "http", false, "Use http") + flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")") + flag.Parse() + + if len(urlString) > 0 { + var err error + parsedUrl, err = url.Parse(urlString) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + host = parsedUrl.Host + useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https" + } else if useHttp { + _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + } + + cmd := flag.Arg(0) + var err error + if useHttp { + trans, err = thrift.NewTHttpClient(parsedUrl.String()) + if len(headers) > 0 { + httptrans := trans.(*thrift.THttpClient) + for key, value := range headers { + httptrans.SetHeader(key, value) + } + } + } else { + portStr := fmt.Sprint(port) + if strings.Contains(host, ":") { + host, portStr, err = net.SplitHostPort(host) + if err != nil { + fmt.Fprintln(os.Stderr, "error with host:", err) + os.Exit(1) + } + } + trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) + if err != nil { + fmt.Fprintln(os.Stderr, "error resolving address:", err) + os.Exit(1) + } + if framed { + trans = thrift.NewTFramedTransport(trans) + } + } + if err != nil { + fmt.Fprintln(os.Stderr, "Error creating transport", err) + os.Exit(1) + } + defer trans.Close() + var protocolFactory thrift.TProtocolFactory + switch protocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + break + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + break + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + break + case "binary", "": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + break + default: + fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) + Usage() + os.Exit(1) + } + iprot := protocolFactory.GetProtocol(trans) + oprot := protocolFactory.GetProtocol(trans) + client := zipkincore.NewZipkinCollectorClient(thrift.NewTStandardClient(iprot, oprot)) + if err := trans.Open(); err != nil { + fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) + os.Exit(1) + } + + switch cmd { + case "submitZipkinBatch": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "SubmitZipkinBatch requires 1 args") + flag.Usage() + } + arg11 := flag.Arg(1) + mbTrans12 := thrift.NewTMemoryBufferLen(len(arg11)) + defer mbTrans12.Close() + _, err13 := mbTrans12.WriteString(arg11) + if err13 != nil { + Usage() + return + } + factory14 := thrift.NewTJSONProtocolFactory() + jsProt15 := factory14.GetProtocol(mbTrans12) + containerStruct0 := zipkincore.NewZipkinCollectorSubmitZipkinBatchArgs() + err16 := containerStruct0.ReadField1(context.Background(), jsProt15) + if err16 != nil { + Usage() + return + } + argvalue0 := containerStruct0.Spans + value0 := argvalue0 + fmt.Print(client.SubmitZipkinBatch(context.Background(), value0)) + fmt.Print("\n") + break + case "": + Usage() + break + default: + fmt.Fprintln(os.Stderr, "Invalid function ", cmd) + } +} diff --git a/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go b/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go new file mode 100644 index 00000000000..043ecba9626 --- /dev/null +++ b/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go @@ -0,0 +1,39 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +const CLIENT_SEND = "cs" +const CLIENT_RECV = "cr" +const SERVER_SEND = "ss" +const SERVER_RECV = "sr" +const MESSAGE_SEND = "ms" +const MESSAGE_RECV = "mr" +const WIRE_SEND = "ws" +const WIRE_RECV = "wr" +const CLIENT_SEND_FRAGMENT = "csf" +const CLIENT_RECV_FRAGMENT = "crf" +const SERVER_SEND_FRAGMENT = "ssf" +const SERVER_RECV_FRAGMENT = "srf" +const LOCAL_COMPONENT = "lc" +const CLIENT_ADDR = "ca" +const SERVER_ADDR = "sa" +const MESSAGE_ADDR = "ma" + +func init() { +} diff --git a/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go b/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go new file mode 100644 index 00000000000..7f46810e0d3 --- /dev/null +++ b/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go @@ -0,0 +1,2067 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +type AnnotationType int64 + +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: + return "BOOL" + case AnnotationType_BYTES: + return "BYTES" + case AnnotationType_I16: + return "I16" + case AnnotationType_I32: + return "I32" + case AnnotationType_I64: + return "I64" + case AnnotationType_DOUBLE: + return "DOUBLE" + case AnnotationType_STRING: + return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": + return AnnotationType_BOOL, nil + case "BYTES": + return AnnotationType_BYTES, nil + case "I16": + return AnnotationType_I16, nil + case "I32": + return AnnotationType_I32, nil + case "I64": + return AnnotationType_I64, nil + case "DOUBLE": + return AnnotationType_DOUBLE, nil + case "STRING": + return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { + q, err := AnnotationTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AnnotationType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AnnotationType(v) + return nil +} + +func (p *AnnotationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// +// Conventionally, when the port isn't known, port = 0. +// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" +// +// Conventionally, when the service name isn't known, service_name = "unknown". +// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` + Port int16 `thrift:"port,2" db:"port" json:"port"` + ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` + Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} + +var Endpoint_Ipv6_DEFAULT []byte + +func (p *Endpoint) GetIpv6() []byte { + return p.Ipv6 +} +func (p *Endpoint) IsSetIpv6() bool { + return p.Ipv6 != nil +} + +func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ipv4 = v + } + return nil +} + +func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Port = v + } + return nil +} + +func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ipv6 = v + } + return nil +} + +func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) + } + return err +} + +func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) + } + if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) + } + return err +} + +func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) + } + return err +} + +func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6() { + if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) + } + } + return err +} + +func (p *Endpoint) Equals(other *Endpoint) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ipv4 != other.Ipv4 { + return false + } + if p.Port != other.Port { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { + return false + } + return true +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// An annotation is similar to a log statement. It includes a host field which +// allows these events to be attributed properly, and also aggregatable. +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. +// - Value +// - Host: Always the host that recorded the event. By specifying the host you allow +// rollup of all events (such as client requests to a service) by IP address. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` + Value string `thrift:"value,2" db:"value" json:"value"` + Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} + +var Annotation_Host_DEFAULT *Endpoint + +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } + return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) + } + } + return err +} + +func (p *Annotation) Equals(other *Annotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if p.Value != other.Value { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of "http.uri" could the path to a resource in a +// RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.uri" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key +// - Value +// - AnnotationType +// - Host: The host that recorded tag, which allows you to differentiate between +// multiple tags with the same key. There are two exceptions to this. +// +// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, or clients such as web browsers. +type BinaryAnnotation struct { + Key string `thrift:"key,1" db:"key" json:"key"` + Value []byte `thrift:"value,2" db:"value" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} + +var BinaryAnnotation_Host_DEFAULT *Endpoint + +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } + return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := AnnotationType(v) + p.AnnotationType = temp + } + return nil +} + +func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) + } + } + return err +} + +func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if bytes.Compare(p.Value, other.Value) != 0 { + return false + } + if p.AnnotationType != other.AnnotationType { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// The root span is where trace_id = id and parent_id = Nil. The root span is +// usually the longest interval in the trace, starting with a SERVER_RECV +// annotation and ending with a SERVER_SEND. +// +// Attributes: +// - TraceID +// - Name: Span name in lowercase, rpc method for example +// +// Conventionally, when the span name isn't known, name = "unknown". +// - ID +// - ParentID +// - Annotations +// - BinaryAnnotations +// - Debug +// - Timestamp: Microseconds from epoch of the creation of this span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// This field is optional for compatibility with old data: first-party span +// stores are expected to support this at time of introduction. +// - Duration: Measurement of duration in microseconds, used to support queries. +// +// This value should be set directly, where possible. Doing so encourages +// precise measurement decoupled from problems of clocks, such as skew or NTP +// updates causing time to move backwards. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this +// means the trace uses 128 bit traceIds instead of 64 bit. +type Span struct { + TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" db:"name" json:"name"` + ID int64 `thrift:"id,4" db:"id" json:"id"` + ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` + Debug bool `thrift:"debug,9" db:"debug" json:"debug"` + Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` + TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} + +var Span_ParentID_DEFAULT int64 + +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } + return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} + +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} + +var Span_Timestamp_DEFAULT int64 + +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } + return *p.Timestamp +} + +var Span_Duration_DEFAULT int64 + +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } + return *p.Duration +} + +var Span_TraceIDHigh_DEFAULT int64 + +func (p *Span) GetTraceIDHigh() int64 { + if !p.IsSetTraceIDHigh() { + return Span_TraceIDHigh_DEFAULT + } + return *p.TraceIDHigh +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) IsSetTraceIDHigh() bool { + return p.TraceIDHigh != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.I64 { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceID = v + } + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ID = v + } + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ParentID = &v + } + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i++ { + _elem0 := &Annotation{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Debug = v + } + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.Timestamp = &v + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.Duration = &v + } + return nil +} + +func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.TraceIDHigh = &v + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField8(ctx, oprot); err != nil { + return err + } + if err := p.writeField9(ctx, oprot); err != nil { + return err + } + if err := p.writeField10(ctx, oprot); err != nil { + return err + } + if err := p.writeField11(ctx, oprot); err != nil { + return err + } + if err := p.writeField12(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) + } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) + } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) + } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) + } + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) + } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) + } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) + } + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) + } + } + return err +} + +func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTraceIDHigh() { + if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) + } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceID != other.TraceID { + return false + } + if p.Name != other.Name { + return false + } + if p.ID != other.ID { + return false + } + if p.ParentID != other.ParentID { + if p.ParentID == nil || other.ParentID == nil { + return false + } + if (*p.ParentID) != (*other.ParentID) { + return false + } + } + if len(p.Annotations) != len(other.Annotations) { + return false + } + for i, _tgt := range p.Annotations { + _src2 := other.Annotations[i] + if !_tgt.Equals(_src2) { + return false + } + } + if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { + return false + } + for i, _tgt := range p.BinaryAnnotations { + _src3 := other.BinaryAnnotations[i] + if !_tgt.Equals(_src3) { + return false + } + } + if p.Debug != other.Debug { + return false + } + if p.Timestamp != other.Timestamp { + if p.Timestamp == nil || other.Timestamp == nil { + return false + } + if (*p.Timestamp) != (*other.Timestamp) { + return false + } + } + if p.Duration != other.Duration { + if p.Duration == nil || other.Duration == nil { + return false + } + if (*p.Duration) != (*other.Duration) { + return false + } + } + if p.TraceIDHigh != other.TraceIDHigh { + if p.TraceIDHigh == nil || other.TraceIDHigh == nil { + return false + } + if (*p.TraceIDHigh) != (*other.TraceIDHigh) { + return false + } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + +// Attributes: +// - Ok +type Response struct { + Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` +} + +func NewResponse() *Response { + return &Response{} +} + +func (p *Response) GetOk() bool { + return p.Ok +} +func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOk bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOk = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOk { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) + } + return nil +} + +func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ok = v + } + return nil +} + +func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) + } + return err +} + +func (p *Response) Equals(other *Response) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ok != other.Ok { + return false + } + return true +} + +func (p *Response) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Response(%+v)", *p) +} + +type ZipkinCollector interface { + // Parameters: + // - Spans + SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) +} + +type ZipkinCollectorClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: c, + } +} + +func (p *ZipkinCollectorClient) Client_() thrift.TClient { + return p.c +} + +func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Spans +func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) { + var _args4 ZipkinCollectorSubmitZipkinBatchArgs + _args4.Spans = spans + var _result6 ZipkinCollectorSubmitZipkinBatchResult + var _meta5 thrift.ResponseMeta + _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6) + p.SetLastResponseMeta_(_meta5) + if _err != nil { + return + } + return _result6.GetSuccess(), nil +} + +type ZipkinCollectorProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler ZipkinCollector +} + +func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { + + self7 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler} + return self7 +} + +func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { + return false, thrift.WrapTException(err2) + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x8.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x8 + +} + +type zipkinCollectorProcessorSubmitZipkinBatch struct { + handler ZipkinCollector +} + +func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ZipkinCollectorSubmitZipkinBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) + } + + result := ZipkinCollectorSubmitZipkinBatchResult{} + var retval []*Response + if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error()) + oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return true, thrift.WrapTException(err2) + } else { + result.Success = retval + } + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Spans +type ZipkinCollectorSubmitZipkinBatchArgs struct { + Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"` +} + +func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { + return &ZipkinCollectorSubmitZipkinBatchArgs{} +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { + return p.Spans +} +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem9 := &Span{} + if err := _elem9.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) + } + p.Spans = append(p.Spans, _elem9) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) + } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ZipkinCollectorSubmitZipkinBatchResult struct { + Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { + return &ZipkinCollectorSubmitZipkinBatchResult{} +} + +var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response + +func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { + return p.Success +} +func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.LIST { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Response, 0, size) + p.Success = tSlice + for i := 0; i < size; i++ { + _elem10 := &Response{} + if err := _elem10.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Success = append(p.Success, _elem10) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) +} diff --git a/exporters/jaeger/internal/third_party/thrift/LICENSE b/exporters/jaeger/internal/third_party/thrift/LICENSE new file mode 100644 index 00000000000..2bc6fbbf65c --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/LICENSE @@ -0,0 +1,306 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: + +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +*/ +(By Douglas Crockford ) + +-------------------------------------------------- +For lib/cpp/src/thrift/windows/SocketPair.cpp + +/* socketpair.c + * Copyright 2007 by Nathan C. Myers ; some rights reserved. + * This code is Free Software. It may be copied freely, in original or + * modified form, subject only to the restrictions that (1) the author is + * relieved from all responsibilities for any use for any purpose, and (2) + * this copyright notice must be retained, unchanged, in its entirety. If + * for any reason the author might be held responsible for any consequences + * of copying or use, license is withheld. + */ + + +-------------------------------------------------- +For lib/py/compat/win32/stdint.h + +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + + +-------------------------------------------------- +Codegen template in t_html_generator.h + +* Bootstrap v2.0.3 +* +* Copyright 2012 Twitter, Inc +* Licensed under the Apache License v2.0 +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Designed and built with all the love in the world @twitter by @mdo and @fat. + +--------------------------------------------------- +For t_cl_generator.cc + + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook + +--------------------------------------------------- diff --git a/exporters/jaeger/internal/third_party/thrift/NOTICE b/exporters/jaeger/internal/third_party/thrift/NOTICE new file mode 100644 index 00000000000..37824e7fb66 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/NOTICE @@ -0,0 +1,5 @@ +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go new file mode 100644 index 00000000000..32d5b0147a2 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +const ( + UNKNOWN_APPLICATION_EXCEPTION = 0 + UNKNOWN_METHOD = 1 + INVALID_MESSAGE_TYPE_EXCEPTION = 2 + WRONG_METHOD_NAME = 3 + BAD_SEQUENCE_ID = 4 + MISSING_RESULT = 5 + INTERNAL_ERROR = 6 + PROTOCOL_ERROR = 7 + INVALID_TRANSFORM = 8 + INVALID_PROTOCOL = 9 + UNSUPPORTED_CLIENT_TYPE = 10 +) + +var defaultApplicationExceptionMessage = map[int32]string{ + UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception", + UNKNOWN_METHOD: "unknown method", + INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type", + WRONG_METHOD_NAME: "wrong method name", + BAD_SEQUENCE_ID: "bad sequence ID", + MISSING_RESULT: "missing result", + INTERNAL_ERROR: "unknown internal error", + PROTOCOL_ERROR: "unknown protocol error", + INVALID_TRANSFORM: "Invalid transform", + INVALID_PROTOCOL: "Invalid protocol", + UNSUPPORTED_CLIENT_TYPE: "Unsupported client type", +} + +// Application level Thrift exception +type TApplicationException interface { + TException + TypeId() int32 + Read(ctx context.Context, iprot TProtocol) error + Write(ctx context.Context, oprot TProtocol) error +} + +type tApplicationException struct { + message string + type_ int32 +} + +var _ TApplicationException = (*tApplicationException)(nil) + +func (tApplicationException) TExceptionType() TExceptionType { + return TExceptionTypeApplication +} + +func (e tApplicationException) Error() string { + if e.message != "" { + return e.message + } + return defaultApplicationExceptionMessage[e.type_] +} + +func NewTApplicationException(type_ int32, message string) TApplicationException { + return &tApplicationException{message, type_} +} + +func (p *tApplicationException) TypeId() int32 { + return p.type_ +} + +func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error { + // TODO: this should really be generated by the compiler + _, err := iprot.ReadStructBegin(ctx) + if err != nil { + return err + } + + message := "" + type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) + + for { + _, ttype, id, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return err + } + if ttype == STOP { + break + } + switch id { + case 1: + if ttype == STRING { + if message, err = iprot.ReadString(ctx); err != nil { + return err + } + } else { + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { + return err + } + } + case 2: + if ttype == I32 { + if type_, err = iprot.ReadI32(ctx); err != nil { + return err + } + } else { + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { + return err + } + } + default: + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { + return err + } + } + if err = iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return err + } + + p.message = message + p.type_ = type_ + + return nil +} + +func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) { + err = oprot.WriteStructBegin(ctx, "TApplicationException") + if len(p.Error()) > 0 { + err = oprot.WriteFieldBegin(ctx, "message", STRING, 1) + if err != nil { + return + } + err = oprot.WriteString(ctx, p.Error()) + if err != nil { + return + } + err = oprot.WriteFieldEnd(ctx) + if err != nil { + return + } + } + err = oprot.WriteFieldBegin(ctx, "type", I32, 2) + if err != nil { + return + } + err = oprot.WriteI32(ctx, p.type_) + if err != nil { + return + } + err = oprot.WriteFieldEnd(ctx) + if err != nil { + return + } + err = oprot.WriteFieldStop(ctx) + if err != nil { + return + } + err = oprot.WriteStructEnd(ctx) + return +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go new file mode 100644 index 00000000000..45c880d32f8 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go @@ -0,0 +1,555 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +type TBinaryProtocol struct { + trans TRichTransport + origTransport TTransport + cfg *TConfiguration + buffer [64]byte +} + +type TBinaryProtocolFactory struct { + cfg *TConfiguration +} + +// Deprecated: Use NewTBinaryProtocolConf instead. +func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { + return NewTBinaryProtocolConf(t, &TConfiguration{ + noPropagation: true, + }) +} + +// Deprecated: Use NewTBinaryProtocolConf instead. +func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { + return NewTBinaryProtocolConf(t, &TConfiguration{ + TBinaryStrictRead: &strictRead, + TBinaryStrictWrite: &strictWrite, + + noPropagation: true, + }) +} + +func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol { + PropagateTConfiguration(t, conf) + p := &TBinaryProtocol{ + origTransport: t, + cfg: conf, + } + if et, ok := t.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(t) + } + return p +} + +// Deprecated: Use NewTBinaryProtocolFactoryConf instead. +func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { + return NewTBinaryProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +// Deprecated: Use NewTBinaryProtocolFactoryConf instead. +func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { + return NewTBinaryProtocolFactoryConf(&TConfiguration{ + TBinaryStrictRead: &strictRead, + TBinaryStrictWrite: &strictWrite, + + noPropagation: true, + }) +} + +func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory { + return &TBinaryProtocolFactory{ + cfg: conf, + } +} + +func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { + return NewTBinaryProtocolConf(t, p.cfg) +} + +func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +/** + * Writing Methods + */ + +func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { + if p.cfg.GetTBinaryStrictWrite() { + version := uint32(VERSION_1) | uint32(typeId) + e := p.WriteI32(ctx, int32(version)) + if e != nil { + return e + } + e = p.WriteString(ctx, name) + if e != nil { + return e + } + e = p.WriteI32(ctx, seqId) + return e + } else { + e := p.WriteString(ctx, name) + if e != nil { + return e + } + e = p.WriteByte(ctx, int8(typeId)) + if e != nil { + return e + } + e = p.WriteI32(ctx, seqId) + return e + } + return nil +} + +func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + e := p.WriteByte(ctx, int8(typeId)) + if e != nil { + return e + } + e = p.WriteI16(ctx, id) + return e +} + +func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error { + e := p.WriteByte(ctx, STOP) + return e +} + +func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + e := p.WriteByte(ctx, int8(keyType)) + if e != nil { + return e + } + e = p.WriteByte(ctx, int8(valueType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + e := p.WriteByte(ctx, int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + e := p.WriteByte(ctx, int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error { + if value { + return p.WriteByte(ctx, 1) + } + return p.WriteByte(ctx, 0) +} + +func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error { + e := p.trans.WriteByte(byte(value)) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error { + v := p.buffer[0:2] + binary.BigEndian.PutUint16(v, uint16(value)) + _, e := p.trans.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error { + v := p.buffer[0:4] + binary.BigEndian.PutUint32(v, uint32(value)) + _, e := p.trans.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error { + v := p.buffer[0:8] + binary.BigEndian.PutUint64(v, uint64(value)) + _, err := p.trans.Write(v) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error { + return p.WriteI64(ctx, int64(math.Float64bits(value))) +} + +func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error { + e := p.WriteI32(ctx, int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.WriteString(value) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error { + e := p.WriteI32(ctx, int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.Write(value) + return NewTProtocolException(err) +} + +/** + * Reading methods + */ + +func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + size, e := p.ReadI32(ctx) + if e != nil { + return "", typeId, 0, NewTProtocolException(e) + } + if size < 0 { + typeId = TMessageType(size & 0x0ff) + version := int64(int64(size) & VERSION_MASK) + if version != VERSION_1 { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) + } + name, e = p.ReadString(ctx) + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + seqId, e = p.ReadI32(ctx) + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + return name, typeId, seqId, nil + } + if p.cfg.GetTBinaryStrictRead() { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) + } + name, e2 := p.readStringBody(size) + if e2 != nil { + return name, typeId, seqId, e2 + } + b, e3 := p.ReadByte(ctx) + if e3 != nil { + return name, typeId, seqId, e3 + } + typeId = TMessageType(b) + seqId, e4 := p.ReadI32(ctx) + if e4 != nil { + return name, typeId, seqId, e4 + } + return name, typeId, seqId, nil +} + +func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + return +} + +func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) { + t, err := p.ReadByte(ctx) + typeId = TType(t) + if err != nil { + return name, typeId, seqId, err + } + if t != STOP { + seqId, err = p.ReadI16(ctx) + } + return name, typeId, seqId, err +} + +func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error { + return nil +} + +var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) + +func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) { + k, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + kType = TType(k) + v, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + vType = TType(v) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return kType, vType, size, nil +} + +func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + b, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + return +} + +func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + b, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return elemType, size, nil +} + +func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) { + b, e := p.ReadByte(ctx) + v := true + if b != 1 { + v = false + } + return v, e +} + +func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.trans.ReadByte() + return int8(v), err +} + +func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) { + buf := p.buffer[0:2] + err = p.readAll(ctx, buf) + value = int16(binary.BigEndian.Uint16(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) { + buf := p.buffer[0:4] + err = p.readAll(ctx, buf) + value = int32(binary.BigEndian.Uint32(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) { + buf := p.buffer[0:8] + err = p.readAll(ctx, buf) + value = int64(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + buf := p.buffer[0:8] + err = p.readAll(ctx, buf) + value = math.Float64frombits(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) { + size, e := p.ReadI32(ctx) + if e != nil { + return "", e + } + err = checkSizeForProtocol(size, p.cfg) + if err != nil { + return + } + if size < 0 { + err = invalidDataLength + return + } + if size == 0 { + return "", nil + } + if size < int32(len(p.buffer)) { + // Avoid allocation on small reads + buf := p.buffer[:size] + read, e := io.ReadFull(p.trans, buf) + return string(buf[:read]), NewTProtocolException(e) + } + + return p.readStringBody(size) +} + +func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) { + size, e := p.ReadI32(ctx) + if e != nil { + return nil, e + } + if err := checkSizeForProtocol(size, p.cfg); err != nil { + return nil, err + } + + buf, err := safeReadBytes(size, p.trans) + return buf, NewTProtocolException(err) +} + +func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) +} + +func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TBinaryProtocol) Transport() TTransport { + return p.origTransport +} + +func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) { + var read int + _, deadlineSet := ctx.Deadline() + for { + read, err = io.ReadFull(p.trans, buf) + if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil { + // This is I/O timeout without anything read, + // and we still have time left, keep retrying. + continue + } + // For anything else, don't retry + break + } + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { + buf, err := safeReadBytes(size, p.trans) + return string(buf), NewTProtocolException(err) +} + +func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) + PropagateTConfiguration(p.origTransport, conf) + p.cfg = conf +} + +var ( + _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil) + _ TConfigurationSetter = (*TBinaryProtocol)(nil) +) + +// This function is shared between TBinaryProtocol and TCompactProtocol. +// +// It tries to read size bytes from trans, in a way that prevents large +// allocations when size is insanely large (mostly caused by malformed message). +func safeReadBytes(size int32, trans io.Reader) ([]byte, error) { + if size < 0 { + return nil, nil + } + + buf := new(bytes.Buffer) + _, err := io.CopyN(buf, trans, int64(size)) + return buf.Bytes(), err +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go new file mode 100644 index 00000000000..aa551b4ab37 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "context" +) + +type TBufferedTransportFactory struct { + size int +} + +type TBufferedTransport struct { + bufio.ReadWriter + tp TTransport +} + +func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return NewTBufferedTransport(trans, p.size), nil +} + +func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { + return &TBufferedTransportFactory{size: bufferSize} +} + +func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { + return &TBufferedTransport{ + ReadWriter: bufio.ReadWriter{ + Reader: bufio.NewReaderSize(trans, bufferSize), + Writer: bufio.NewWriterSize(trans, bufferSize), + }, + tp: trans, + } +} + +func (p *TBufferedTransport) IsOpen() bool { + return p.tp.IsOpen() +} + +func (p *TBufferedTransport) Open() (err error) { + return p.tp.Open() +} + +func (p *TBufferedTransport) Close() (err error) { + return p.tp.Close() +} + +func (p *TBufferedTransport) Read(b []byte) (int, error) { + n, err := p.ReadWriter.Read(b) + if err != nil { + p.ReadWriter.Reader.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Write(b []byte) (int, error) { + n, err := p.ReadWriter.Write(b) + if err != nil { + p.ReadWriter.Writer.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Flush(ctx context.Context) error { + if err := p.ReadWriter.Flush(); err != nil { + p.ReadWriter.Writer.Reset(p.tp) + return err + } + return p.tp.Flush(ctx) +} + +func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { + return p.tp.RemainingBytes() +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.tp, conf) +} + +var _ TConfigurationSetter = (*TBufferedTransport)(nil) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go new file mode 100644 index 00000000000..ea2c01fdadb --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go @@ -0,0 +1,109 @@ +package thrift + +import ( + "context" + "fmt" +) + +// ResponseMeta represents the metadata attached to the response. +type ResponseMeta struct { + // The headers in the response, if any. + // If the underlying transport/protocol is not THeader, this will always be nil. + Headers THeaderMap +} + +type TClient interface { + Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) +} + +type TStandardClient struct { + seqId int32 + iprot, oprot TProtocol +} + +// TStandardClient implements TClient, and uses the standard message format for Thrift. +// It is not safe for concurrent use. +func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient { + return &TStandardClient{ + iprot: inputProtocol, + oprot: outputProtocol, + } +} + +func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error { + // Set headers from context object on THeaderProtocol + if headerProt, ok := oprot.(*THeaderProtocol); ok { + headerProt.ClearWriteHeaders() + for _, key := range GetWriteHeaderList(ctx) { + if value, ok := GetHeader(ctx, key); ok { + headerProt.SetWriteHeader(key, value) + } + } + } + + if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil { + return err + } + if err := args.Write(ctx, oprot); err != nil { + return err + } + if err := oprot.WriteMessageEnd(ctx); err != nil { + return err + } + return oprot.Flush(ctx) +} + +func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error { + rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx) + if err != nil { + return err + } + + if method != rMethod { + return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method)) + } else if seqId != rSeqId { + return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method)) + } else if rTypeId == EXCEPTION { + var exception tApplicationException + if err := exception.Read(ctx, iprot); err != nil { + return err + } + + if err := iprot.ReadMessageEnd(ctx); err != nil { + return err + } + + return &exception + } else if rTypeId != REPLY { + return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method)) + } + + if err := result.Read(ctx, iprot); err != nil { + return err + } + + return iprot.ReadMessageEnd(ctx) +} + +func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { + p.seqId++ + seqId := p.seqId + + if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { + return ResponseMeta{}, err + } + + // method is oneway + if result == nil { + return ResponseMeta{}, nil + } + + err := p.Recv(ctx, p.iprot, seqId, method, result) + var headers THeaderMap + if hp, ok := p.iprot.(*THeaderProtocol); ok { + headers = hp.transport.readHeaders + } + return ResponseMeta{ + Headers: headers, + }, err +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go new file mode 100644 index 00000000000..a49225dabfb --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go @@ -0,0 +1,865 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +const ( + COMPACT_PROTOCOL_ID = 0x082 + COMPACT_VERSION = 1 + COMPACT_VERSION_MASK = 0x1f + COMPACT_TYPE_MASK = 0x0E0 + COMPACT_TYPE_BITS = 0x07 + COMPACT_TYPE_SHIFT_AMOUNT = 5 +) + +type tCompactType byte + +const ( + COMPACT_BOOLEAN_TRUE = 0x01 + COMPACT_BOOLEAN_FALSE = 0x02 + COMPACT_BYTE = 0x03 + COMPACT_I16 = 0x04 + COMPACT_I32 = 0x05 + COMPACT_I64 = 0x06 + COMPACT_DOUBLE = 0x07 + COMPACT_BINARY = 0x08 + COMPACT_LIST = 0x09 + COMPACT_SET = 0x0A + COMPACT_MAP = 0x0B + COMPACT_STRUCT = 0x0C +) + +var ( + ttypeToCompactType map[TType]tCompactType +) + +func init() { + ttypeToCompactType = map[TType]tCompactType{ + STOP: STOP, + BOOL: COMPACT_BOOLEAN_TRUE, + BYTE: COMPACT_BYTE, + I16: COMPACT_I16, + I32: COMPACT_I32, + I64: COMPACT_I64, + DOUBLE: COMPACT_DOUBLE, + STRING: COMPACT_BINARY, + LIST: COMPACT_LIST, + SET: COMPACT_SET, + MAP: COMPACT_MAP, + STRUCT: COMPACT_STRUCT, + } +} + +type TCompactProtocolFactory struct { + cfg *TConfiguration +} + +// Deprecated: Use NewTCompactProtocolFactoryConf instead. +func NewTCompactProtocolFactory() *TCompactProtocolFactory { + return NewTCompactProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory { + return &TCompactProtocolFactory{ + cfg: conf, + } +} + +func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTCompactProtocolConf(trans, p.cfg) +} + +func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +type TCompactProtocol struct { + trans TRichTransport + origTransport TTransport + + cfg *TConfiguration + + // Used to keep track of the last field for the current and previous structs, + // so we can do the delta stuff. + lastField []int + lastFieldId int + + // If we encounter a boolean field begin, save the TField here so it can + // have the value incorporated. + booleanFieldName string + booleanFieldId int16 + booleanFieldPending bool + + // If we read a field header, and it's a boolean field, save the boolean + // value here so that readBool can use it. + boolValue bool + boolValueIsNotNull bool + buffer [64]byte +} + +// Deprecated: Use NewTCompactProtocolConf instead. +func NewTCompactProtocol(trans TTransport) *TCompactProtocol { + return NewTCompactProtocolConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol { + PropagateTConfiguration(trans, conf) + p := &TCompactProtocol{ + origTransport: trans, + cfg: conf, + } + if et, ok := trans.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(trans) + } + + return p +} + +// +// Public Writing methods. +// + +// Write a message header to the wire. Compact Protocol messages contain the +// protocol version so we can migrate forwards in the future if need be. +func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + err := p.writeByteDirect(COMPACT_PROTOCOL_ID) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) + if err != nil { + return NewTProtocolException(err) + } + _, err = p.writeVarint32(seqid) + if err != nil { + return NewTProtocolException(err) + } + e := p.WriteString(ctx, name) + return e + +} + +func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil } + +// Write a struct begin. This doesn't actually put anything on the wire. We +// use it as an opportunity to put special placeholder markers on the field +// stack so we can get the field id deltas correct. +func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return nil +} + +// Write a struct end. This doesn't actually put anything on the wire. We use +// this as an opportunity to pop the last field from the current struct off +// of the field stack. +func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error { + if len(p.lastField) <= 0 { + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before")) + } + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if typeId == BOOL { + // we want to possibly include the value, so we'll wait. + p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true + return nil + } + _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF) + return NewTProtocolException(err) +} + +// The workhorse of writeFieldBegin. It has the option of doing a +// 'type override' of the type header. This is used specifically in the +// boolean field case. +func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) { + // short lastField = lastField_.pop(); + + // if there's a type override, use that. + var typeToWrite byte + if typeOverride == 0xFF { + typeToWrite = byte(p.getCompactType(typeId)) + } else { + typeToWrite = typeOverride + } + // check if we can use delta encoding for the field id + fieldId := int(id) + written := 0 + if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { + // write them together + err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) + if err != nil { + return 0, err + } + } else { + // write them separate + err := p.writeByteDirect(typeToWrite) + if err != nil { + return 0, err + } + err = p.WriteI16(ctx, id) + written = 1 + 2 + if err != nil { + return 0, err + } + } + + p.lastFieldId = fieldId + return written, nil +} + +func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil } + +func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error { + err := p.writeByteDirect(STOP) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + if size == 0 { + err := p.writeByteDirect(0) + return NewTProtocolException(err) + } + _, err := p.writeVarint32(int32(size)) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil } + +// Write a list header. +func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil } + +// Write a set header. +func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil } + +func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error { + v := byte(COMPACT_BOOLEAN_FALSE) + if value { + v = byte(COMPACT_BOOLEAN_TRUE) + } + if p.booleanFieldPending { + // we haven't written the field header yet + _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v) + p.booleanFieldPending = false + return NewTProtocolException(err) + } + // we're not part of a field, so just write the value. + err := p.writeByteDirect(v) + return NewTProtocolException(err) +} + +// Write a byte. Nothing to see here! +func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error { + err := p.writeByteDirect(byte(value)) + return NewTProtocolException(err) +} + +// Write an I16 as a zigzag varint. +func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error { + _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) + return NewTProtocolException(err) +} + +// Write an i32 as a zigzag varint. +func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error { + _, err := p.writeVarint32(p.int32ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write an i64 as a zigzag varint. +func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error { + _, err := p.writeVarint64(p.int64ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write a double to the wire as 8 bytes. +func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error { + buf := p.buffer[0:8] + binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) + _, err := p.trans.Write(buf) + return NewTProtocolException(err) +} + +// Write a string to the wire with a varint size preceding. +func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error { + _, e := p.writeVarint32(int32(len(value))) + if e != nil { + return NewTProtocolException(e) + } + if len(value) > 0 { + } + _, e = p.trans.WriteString(value) + return e +} + +// Write a byte array, using a varint for the size. +func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error { + _, e := p.writeVarint32(int32(len(bin))) + if e != nil { + return NewTProtocolException(e) + } + if len(bin) > 0 { + _, e = p.trans.Write(bin) + return NewTProtocolException(e) + } + return nil +} + +// +// Reading methods. +// + +// Read a message header. +func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + var protocolId byte + + _, deadlineSet := ctx.Deadline() + for { + protocolId, err = p.readByteDirect() + if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { + // keep retrying I/O timeout errors since we still have + // time left + continue + } + // For anything else, don't retry + break + } + if err != nil { + return + } + + if protocolId != COMPACT_PROTOCOL_ID { + e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) + return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) + } + + versionAndType, err := p.readByteDirect() + if err != nil { + return + } + + version := versionAndType & COMPACT_VERSION_MASK + typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) + if version != COMPACT_VERSION { + e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) + err = NewTProtocolExceptionWithType(BAD_VERSION, e) + return + } + seqId, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + name, err = p.ReadString(ctx) + return +} + +func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil } + +// Read a struct begin. There's nothing on the wire for this, but it is our +// opportunity to push a new struct begin marker onto the field stack. +func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return +} + +// Doesn't actually consume any wire data, just removes the last field for +// this struct from the field stack. +func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error { + // consume the last field we read off the wire. + if len(p.lastField) <= 0 { + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before")) + } + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +// Read a field header off the wire. +func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { + t, err := p.readByteDirect() + if err != nil { + return + } + + // if it's a stop, then we can return immediately, as the struct is over. + if (t & 0x0f) == STOP { + return "", STOP, 0, nil + } + + // mask off the 4 MSB of the type header. it could contain a field id delta. + modifier := int16((t & 0xf0) >> 4) + if modifier == 0 { + // not a delta. look ahead for the zigzag varint field id. + id, err = p.ReadI16(ctx) + if err != nil { + return + } + } else { + // has a delta. add the delta to the last read field id. + id = int16(p.lastFieldId) + modifier + } + typeId, e := p.getTType(tCompactType(t & 0x0f)) + if e != nil { + err = NewTProtocolException(e) + return + } + + // if this happens to be a boolean field, the value is encoded in the type + if p.isBoolType(t) { + // save the boolean value in a special instance variable. + p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) + p.boolValueIsNotNull = true + } + + // push the new field onto the field stack so we can keep the deltas going. + p.lastFieldId = int(id) + return +} + +func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil } + +// Read a map header off the wire. If the size is zero, skip reading the key +// and value type. This means that 0-length maps will yield TMaps without the +// "correct" types. +func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + size32, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + keyAndValueType := byte(STOP) + if size != 0 { + keyAndValueType, err = p.readByteDirect() + if err != nil { + return + } + } + keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) + valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) + return +} + +func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil } + +// Read a list header off the wire. If the list size is 0-14, the size will +// be packed into the element type header. If it's a longer list, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + size_and_type, err := p.readByteDirect() + if err != nil { + return + } + size = int((size_and_type >> 4) & 0x0f) + if size == 15 { + size2, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size2 < 0 { + err = invalidDataLength + return + } + size = int(size2) + } + elemType, e := p.getTType(tCompactType(size_and_type)) + if e != nil { + err = NewTProtocolException(e) + return + } + return +} + +func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil } + +// Read a set header off the wire. If the set size is 0-14, the size will +// be packed into the element type header. If it's a longer set, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.ReadListBegin(ctx) +} + +func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil } + +// Read a boolean off the wire. If this is a boolean field, the value should +// already have been read during readFieldBegin, so we'll just consume the +// pre-stored value. Otherwise, read a byte. +func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) { + if p.boolValueIsNotNull { + p.boolValueIsNotNull = false + return p.boolValue, nil + } + v, err := p.readByteDirect() + return v == COMPACT_BOOLEAN_TRUE, err +} + +// Read a single byte off the wire. Nothing interesting here. +func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.readByteDirect() + if err != nil { + return 0, NewTProtocolException(err) + } + return int8(v), err +} + +// Read an i16 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) { + v, err := p.ReadI32(ctx) + return int16(v), err +} + +// Read an i32 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) { + v, e := p.readVarint32() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt32(v) + return value, nil +} + +// Read an i64 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) { + v, e := p.readVarint64() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt64(v) + return value, nil +} + +// No magic here - just read a double off the wire. +func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + longBits := p.buffer[0:8] + _, e := io.ReadFull(p.trans, longBits) + if e != nil { + return 0.0, NewTProtocolException(e) + } + return math.Float64frombits(p.bytesToUint64(longBits)), nil +} + +// Reads a []byte (via readBinary), and then UTF-8 decodes it. +func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) { + length, e := p.readVarint32() + if e != nil { + return "", NewTProtocolException(e) + } + err = checkSizeForProtocol(length, p.cfg) + if err != nil { + return + } + if length == 0 { + return "", nil + } + if length < int32(len(p.buffer)) { + // Avoid allocation on small reads + buf := p.buffer[:length] + read, e := io.ReadFull(p.trans, buf) + return string(buf[:read]), NewTProtocolException(e) + } + + buf, e := safeReadBytes(length, p.trans) + return string(buf), NewTProtocolException(e) +} + +// Read a []byte from the wire. +func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + length, e := p.readVarint32() + if e != nil { + return nil, NewTProtocolException(e) + } + err = checkSizeForProtocol(length, p.cfg) + if err != nil { + return + } + if length == 0 { + return []byte{}, nil + } + + buf, e := safeReadBytes(length, p.trans) + return buf, NewTProtocolException(e) +} + +func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) +} + +func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TCompactProtocol) Transport() TTransport { + return p.origTransport +} + +// +// Internal writing methods +// + +// Abstract method for writing the start of lists and sets. List and sets on +// the wire differ only by the type indicator. +func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { + if size <= 14 { + return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) + } + err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) + if err != nil { + return 0, err + } + m, err := p.writeVarint32(int32(size)) + return 1 + m, err +} + +// Write an i32 as a varint. Results in 1-5 bytes on the wire. +// TODO(pomack): make a permanent buffer like writeVarint64? +func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { + i32buf := p.buffer[0:5] + idx := 0 + for { + if (n & ^0x7F) == 0 { + i32buf[idx] = byte(n) + idx++ + // p.writeByteDirect(byte(n)); + break + // return; + } else { + i32buf[idx] = byte((n & 0x7F) | 0x80) + idx++ + // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); + u := uint32(n) + n = int32(u >> 7) + } + } + return p.trans.Write(i32buf[0:idx]) +} + +// Write an i64 as a varint. Results in 1-10 bytes on the wire. +func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { + varint64out := p.buffer[0:10] + idx := 0 + for { + if (n & ^0x7F) == 0 { + varint64out[idx] = byte(n) + idx++ + break + } else { + varint64out[idx] = byte((n & 0x7F) | 0x80) + idx++ + u := uint64(n) + n = int64(u >> 7) + } + } + return p.trans.Write(varint64out[0:idx]) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { + return (l << 1) ^ (l >> 63) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { + return (n << 1) ^ (n >> 31) +} + +func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { + binary.LittleEndian.PutUint64(buf, n) +} + +func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { + binary.LittleEndian.PutUint64(buf, uint64(n)) +} + +// Writes a byte without any possibility of all that field header nonsense. +// Used internally by other writing methods that know they need to write a byte. +func (p *TCompactProtocol) writeByteDirect(b byte) error { + return p.trans.WriteByte(b) +} + +// Writes a byte without any possibility of all that field header nonsense. +func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { + return 1, p.writeByteDirect(byte(n)) +} + +// +// Internal reading methods +// + +// Read an i32 from the wire as a varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 5 bytes. +func (p *TCompactProtocol) readVarint32() (int32, error) { + // if the wire contains the right stuff, this will just truncate the i64 we + // read and get us the right sign. + v, err := p.readVarint64() + return int32(v), err +} + +// Read an i64 from the wire as a proper varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 10 bytes. +func (p *TCompactProtocol) readVarint64() (int64, error) { + shift := uint(0) + result := int64(0) + for { + b, err := p.readByteDirect() + if err != nil { + return 0, err + } + result |= int64(b&0x7f) << shift + if (b & 0x80) != 0x80 { + break + } + shift += 7 + } + return result, nil +} + +// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. +func (p *TCompactProtocol) readByteDirect() (byte, error) { + return p.trans.ReadByte() +} + +// +// encoding helpers +// + +// Convert from zigzag int to int. +func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { + u := uint32(n) + return int32(u>>1) ^ -(n & 1) +} + +// Convert from zigzag long to long. +func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { + u := uint64(n) + return int64(u>>1) ^ -(n & 1) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { + return int64(binary.LittleEndian.Uint64(b)) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { + return binary.LittleEndian.Uint64(b) +} + +// +// type testing and converting +// + +func (p *TCompactProtocol) isBoolType(b byte) bool { + return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE +} + +// Given a tCompactType constant, convert it to its corresponding +// TType value. +func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { + switch byte(t) & 0x0f { + case STOP: + return STOP, nil + case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: + return BOOL, nil + case COMPACT_BYTE: + return BYTE, nil + case COMPACT_I16: + return I16, nil + case COMPACT_I32: + return I32, nil + case COMPACT_I64: + return I64, nil + case COMPACT_DOUBLE: + return DOUBLE, nil + case COMPACT_BINARY: + return STRING, nil + case COMPACT_LIST: + return LIST, nil + case COMPACT_SET: + return SET, nil + case COMPACT_MAP: + return MAP, nil + case COMPACT_STRUCT: + return STRUCT, nil + } + return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f)) +} + +// Given a TType value, find the appropriate TCompactProtocol.Types constant. +func (p *TCompactProtocol) getCompactType(t TType) tCompactType { + return ttypeToCompactType[t] +} + +func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) + PropagateTConfiguration(p.origTransport, conf) + p.cfg = conf +} + +var ( + _ TConfigurationSetter = (*TCompactProtocolFactory)(nil) + _ TConfigurationSetter = (*TCompactProtocol)(nil) +) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go new file mode 100644 index 00000000000..454d9f37748 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go @@ -0,0 +1,378 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "fmt" + "time" +) + +// Default TConfiguration values. +const ( + DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024 + DEFAULT_MAX_FRAME_SIZE = 16384000 + + DEFAULT_TBINARY_STRICT_READ = false + DEFAULT_TBINARY_STRICT_WRITE = true + + DEFAULT_CONNECT_TIMEOUT = 0 + DEFAULT_SOCKET_TIMEOUT = 0 +) + +// TConfiguration defines some configurations shared between TTransport, +// TProtocol, TTransportFactory, TProtocolFactory, and other implementations. +// +// When constructing TConfiguration, you only need to specify the non-default +// fields. All zero values have sane default values. +// +// Not all configurations defined are applicable to all implementations. +// Implementations are free to ignore the configurations not applicable to them. +// +// All functions attached to this type are nil-safe. +// +// See [1] for spec. +// +// NOTE: When using TConfiguration, fill in all the configurations you want to +// set across the stack, not only the ones you want to set in the immediate +// TTransport/TProtocol. +// +// For example, say you want to migrate this old code into using TConfiguration: +// +// sccket := thrift.NewTSocketTimeout("host:port", time.Second) +// transFactory := thrift.NewTFramedTransportFactoryMaxLength( +// thrift.NewTTransportFactory(), +// 1024 * 1024 * 256, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) +// +// This is the wrong way to do it because in the end the TConfiguration used by +// socket and transFactory will be overwritten by the one used by protoFactory +// because of TConfiguration propagation: +// +// // bad example, DO NOT USE +// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// }) +// transFactory := thrift.NewTFramedTransportFactoryConf( +// thrift.NewTTransportFactory(), +// &thrift.TConfiguration{ +// MaxFrameSize: 1024 * 1024 * 256, +// }, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// }) +// +// This is the correct way to do it: +// +// conf := &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// +// MaxFrameSize: 1024 * 1024 * 256, +// +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// } +// sccket := thrift.NewTSocketConf("host:port", conf) +// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) +// +// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md +type TConfiguration struct { + // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead. + MaxMessageSize int32 + + // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead. + // + // Also if MaxMessageSize < MaxFrameSize, + // MaxMessageSize will be used instead. + MaxFrameSize int32 + + // Connect and socket timeouts to be used by TSocket and TSSLSocket. + // + // 0 means no timeout. + // + // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be + // used. + ConnectTimeout time.Duration + SocketTimeout time.Duration + + // TLS config to be used by TSSLSocket. + TLSConfig *tls.Config + + // Strict read/write configurations for TBinaryProtocol. + // + // BoolPtr helper function is available to use literal values. + TBinaryStrictRead *bool + TBinaryStrictWrite *bool + + // The wrapped protocol id to be used in THeader transport/protocol. + // + // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions + // are provided to help filling this value. + THeaderProtocolID *THeaderProtocolID + + // Used internally by deprecated constructors, to avoid overriding + // underlying TTransport/TProtocol's cfg by accidental propagations. + // + // For external users this is always false. + noPropagation bool +} + +// GetMaxMessageSize returns the max message size an implementation should +// follow. +// +// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil. +func (tc *TConfiguration) GetMaxMessageSize() int32 { + if tc == nil || tc.MaxMessageSize <= 0 { + return DEFAULT_MAX_MESSAGE_SIZE + } + return tc.MaxMessageSize +} + +// GetMaxFrameSize returns the max frame size an implementation should follow. +// +// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil. +// +// If the configured max message size is smaller than the configured max frame +// size, the smaller one will be returned instead. +func (tc *TConfiguration) GetMaxFrameSize() int32 { + if tc == nil { + return DEFAULT_MAX_FRAME_SIZE + } + maxFrameSize := tc.MaxFrameSize + if maxFrameSize <= 0 { + maxFrameSize = DEFAULT_MAX_FRAME_SIZE + } + if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize { + return maxMessageSize + } + return maxFrameSize +} + +// GetConnectTimeout returns the connect timeout should be used by TSocket and +// TSSLSocket. +// +// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead. +func (tc *TConfiguration) GetConnectTimeout() time.Duration { + if tc == nil || tc.ConnectTimeout < 0 { + return DEFAULT_CONNECT_TIMEOUT + } + return tc.ConnectTimeout +} + +// GetSocketTimeout returns the socket timeout should be used by TSocket and +// TSSLSocket. +// +// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead. +func (tc *TConfiguration) GetSocketTimeout() time.Duration { + if tc == nil || tc.SocketTimeout < 0 { + return DEFAULT_SOCKET_TIMEOUT + } + return tc.SocketTimeout +} + +// GetTLSConfig returns the tls config should be used by TSSLSocket. +// +// It's nil-safe. If tc is nil, nil will be returned instead. +func (tc *TConfiguration) GetTLSConfig() *tls.Config { + if tc == nil { + return nil + } + return tc.TLSConfig +} + +// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol +// should follow. +// +// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or +// tc.TBinaryStrictRead is nil. +func (tc *TConfiguration) GetTBinaryStrictRead() bool { + if tc == nil || tc.TBinaryStrictRead == nil { + return DEFAULT_TBINARY_STRICT_READ + } + return *tc.TBinaryStrictRead +} + +// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol +// should follow. +// +// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or +// tc.TBinaryStrictWrite is nil. +func (tc *TConfiguration) GetTBinaryStrictWrite() bool { + if tc == nil || tc.TBinaryStrictWrite == nil { + return DEFAULT_TBINARY_STRICT_WRITE + } + return *tc.TBinaryStrictWrite +} + +// GetTHeaderProtocolID returns the THeaderProtocolID should be used by +// THeaderProtocol clients (for servers, they always use the same one as the +// client instead). +// +// It's nil-safe. If either tc or tc.THeaderProtocolID is nil, +// THeaderProtocolDefault will be returned instead. +// THeaderProtocolDefault will also be returned if configured value is invalid. +func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID { + if tc == nil || tc.THeaderProtocolID == nil { + return THeaderProtocolDefault + } + protoID := *tc.THeaderProtocolID + if err := protoID.Validate(); err != nil { + return THeaderProtocolDefault + } + return protoID +} + +// THeaderProtocolIDPtr validates and returns the pointer to id. +// +// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault +// and the validation error will be returned. +func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) { + err := id.Validate() + if err != nil { + id = THeaderProtocolDefault + } + return &id, err +} + +// THeaderProtocolIDPtrMust validates and returns the pointer to id. +// +// It's similar to THeaderProtocolIDPtr, but it panics on validation errors +// instead of returning them. +func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID { + ptr, err := THeaderProtocolIDPtr(id) + if err != nil { + panic(err) + } + return ptr +} + +// TConfigurationSetter is an optional interface TProtocol, TTransport, +// TProtocolFactory, TTransportFactory, and other implementations can implement. +// +// It's intended to be called during intializations. +// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the +// middle of a message is undefined: +// It may or may not change the behavior of the current processing message, +// and it may even cause the current message to fail. +// +// Note for implementations: SetTConfiguration might be called multiple times +// with the same value in quick successions due to the implementation of the +// propagation. Implementations should make SetTConfiguration as simple as +// possible (usually just overwrite the stored configuration and propagate it to +// the wrapped TTransports/TProtocols). +type TConfigurationSetter interface { + SetTConfiguration(*TConfiguration) +} + +// PropagateTConfiguration propagates cfg to impl if impl implements +// TConfigurationSetter and cfg is non-nil, otherwise it does nothing. +// +// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration +// with everything being default value, use &TConfiguration{} explicitly instead. +func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) { + if cfg == nil || cfg.noPropagation { + return + } + + if setter, ok := impl.(TConfigurationSetter); ok { + setter.SetTConfiguration(cfg) + } +} + +func checkSizeForProtocol(size int32, cfg *TConfiguration) error { + if size < 0 { + return NewTProtocolExceptionWithType( + NEGATIVE_SIZE, + fmt.Errorf("negative size: %d", size), + ) + } + if size > cfg.GetMaxMessageSize() { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + fmt.Errorf("size exceeded max allowed: %d", size), + ) + } + return nil +} + +type tTransportFactoryConf struct { + delegate TTransportFactory + cfg *TConfiguration +} + +func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) { + trans, err := f.delegate.GetTransport(orig) + if err == nil { + PropagateTConfiguration(orig, f.cfg) + PropagateTConfiguration(trans, f.cfg) + } + return trans, err +} + +func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.delegate, f.cfg) + f.cfg = cfg +} + +// TTransportFactoryConf wraps a TTransportFactory to propagate +// TConfiguration on the factory's GetTransport calls. +func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory { + return &tTransportFactoryConf{ + delegate: delegate, + cfg: conf, + } +} + +type tProtocolFactoryConf struct { + delegate TProtocolFactory + cfg *TConfiguration +} + +func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol { + proto := f.delegate.GetProtocol(trans) + PropagateTConfiguration(trans, f.cfg) + PropagateTConfiguration(proto, f.cfg) + return proto +} + +func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.delegate, f.cfg) + f.cfg = cfg +} + +// TProtocolFactoryConf wraps a TProtocolFactory to propagate +// TConfiguration on the factory's GetProtocol calls. +func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory { + return &tProtocolFactoryConf{ + delegate: delegate, + cfg: conf, + } +} + +var ( + _ TConfigurationSetter = (*tTransportFactoryConf)(nil) + _ TConfigurationSetter = (*tProtocolFactoryConf)(nil) +) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go new file mode 100644 index 00000000000..d15c1bcf894 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +var defaultCtx = context.Background() diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go new file mode 100644 index 00000000000..fdf9bfec15e --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go @@ -0,0 +1,447 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "fmt" +) + +type TDebugProtocol struct { + // Required. The actual TProtocol to do the read/write. + Delegate TProtocol + + // Optional. The logger and prefix to log all the args/return values + // from Delegate TProtocol calls. + // + // If Logger is nil, StdLogger using stdlib log package with os.Stderr + // will be used. If disable logging is desired, set Logger to NopLogger + // explicitly instead of leaving it as nil/unset. + Logger Logger + LogPrefix string + + // Optional. An TProtocol to duplicate everything read/written from Delegate. + // + // A typical use case of this is to use TSimpleJSONProtocol wrapping + // TMemoryBuffer in a middleware to json logging requests/responses. + // + // This feature is not available from TDebugProtocolFactory. In order to + // use it you have to construct TDebugProtocol directly, or set DuplicateTo + // field after getting a TDebugProtocol from the factory. + DuplicateTo TProtocol +} + +type TDebugProtocolFactory struct { + Underlying TProtocolFactory + LogPrefix string + Logger Logger +} + +// NewTDebugProtocolFactory creates a TDebugProtocolFactory. +// +// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct +// itself instead. This version will use the default logger from standard +// library. +func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + Logger: StdLogger(nil), + } +} + +// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory. +func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + Logger: logger, + } +} + +func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return &TDebugProtocol{ + Delegate: t.Underlying.GetProtocol(trans), + LogPrefix: t.LogPrefix, + Logger: fallbackLogger(t.Logger), + } +} + +func (tdp *TDebugProtocol) logf(format string, v ...interface{}) { + fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...)) +} + +func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid) + tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + } + return err +} +func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error { + err := tdp.Delegate.WriteMessageEnd(ctx) + tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error { + err := tdp.Delegate.WriteStructBegin(ctx, name) + tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructBegin(ctx, name) + } + return err +} +func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error { + err := tdp.Delegate.WriteStructEnd(ctx) + tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id) + tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error { + err := tdp.Delegate.WriteFieldEnd(ctx) + tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error { + err := tdp.Delegate.WriteFieldStop(ctx) + tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldStop(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size) + tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error { + err := tdp.Delegate.WriteMapEnd(ctx) + tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + err := tdp.Delegate.WriteListBegin(ctx, elemType, size) + tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error { + err := tdp.Delegate.WriteListEnd(ctx) + tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + err := tdp.Delegate.WriteSetBegin(ctx, elemType, size) + tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error { + err := tdp.Delegate.WriteSetEnd(ctx) + tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error { + err := tdp.Delegate.WriteBool(ctx, value) + tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBool(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error { + err := tdp.Delegate.WriteByte(ctx, value) + tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteByte(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error { + err := tdp.Delegate.WriteI16(ctx, value) + tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI16(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error { + err := tdp.Delegate.WriteI32(ctx, value) + tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI32(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error { + err := tdp.Delegate.WriteI64(ctx, value) + tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI64(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error { + err := tdp.Delegate.WriteDouble(ctx, value) + tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteDouble(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error { + err := tdp.Delegate.WriteString(ctx, value) + tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteString(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error { + err := tdp.Delegate.WriteBinary(ctx, value) + tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBinary(ctx, value) + } + return err +} + +func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { + name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx) + tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + } + return +} +func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadMessageEnd(ctx) + tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + name, err = tdp.Delegate.ReadStructBegin(ctx) + tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructBegin(ctx, name) + } + return +} +func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadStructEnd(ctx) + tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { + name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx) + tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + } + return +} +func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadFieldEnd(ctx) + tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx) + tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + } + return +} +func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadMapEnd(ctx) + tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadListBegin(ctx) + tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) + } + return +} +func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadListEnd(ctx) + tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadSetBegin(ctx) + tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + } + return +} +func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadSetEnd(ctx) + tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) { + value, err = tdp.Delegate.ReadBool(ctx) + tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBool(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) { + value, err = tdp.Delegate.ReadByte(ctx) + tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteByte(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) { + value, err = tdp.Delegate.ReadI16(ctx) + tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI16(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) { + value, err = tdp.Delegate.ReadI32(ctx) + tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI32(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) { + value, err = tdp.Delegate.ReadI64(ctx) + tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI64(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + value, err = tdp.Delegate.ReadDouble(ctx) + tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteDouble(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) { + value, err = tdp.Delegate.ReadString(ctx) + tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteString(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + value, err = tdp.Delegate.ReadBinary(ctx) + tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBinary(ctx, value) + } + return +} +func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + err = tdp.Delegate.Skip(ctx, fieldType) + tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.Skip(ctx, fieldType) + } + return +} +func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { + err = tdp.Delegate.Flush(ctx) + tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.Flush(ctx) + } + return +} + +func (tdp *TDebugProtocol) Transport() TTransport { + return tdp.Delegate.Transport() +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(tdp.Delegate, conf) + PropagateTConfiguration(tdp.DuplicateTo, conf) +} + +var _ TConfigurationSetter = (*TDebugProtocol)(nil) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go new file mode 100644 index 00000000000..cefc7ecda5d --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "sync" +) + +type TDeserializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +func NewTDeserializer() *TDeserializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolTransport(transport) + + return &TDeserializer{ + Transport: transport, + Protocol: protocol, + } +} + +func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) { + t.Transport.Reset() + + err = nil + if _, err = t.Transport.Write([]byte(s)); err != nil { + return + } + if err = msg.Read(ctx, t.Protocol); err != nil { + return + } + return +} + +func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) { + t.Transport.Reset() + + err = nil + if _, err = t.Transport.Write(b); err != nil { + return + } + if err = msg.Read(ctx, t.Protocol); err != nil { + return + } + return +} + +// TDeserializerPool is the thread-safe version of TDeserializer, +// it uses resource pool of TDeserializer under the hood. +// +// It must be initialized with either NewTDeserializerPool or +// NewTDeserializerPoolSizeFactory. +type TDeserializerPool struct { + pool sync.Pool +} + +// NewTDeserializerPool creates a new TDeserializerPool. +// +// NewTDeserializer can be used as the arg here. +func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool { + return &TDeserializerPool{ + pool: sync.Pool{ + New: func() interface{} { + return f() + }, + }, + } +} + +// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with +// the given size and protocol factory. +// +// Note that the size is not the limit. The TMemoryBuffer underneath can grow +// larger than that. It just dictates the initial size. +func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool { + return &TDeserializerPool{ + pool: sync.Pool{ + New: func() interface{} { + transport := NewTMemoryBufferLen(size) + protocol := factory.GetProtocol(transport) + + return &TDeserializer{ + Transport: transport, + Protocol: protocol, + } + }, + }, + } +} + +func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error { + d := t.pool.Get().(*TDeserializer) + defer t.pool.Put(d) + return d.ReadString(ctx, msg, s) +} + +func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error { + d := t.pool.Get().(*TDeserializer) + defer t.pool.Put(d) + return d.Read(ctx, msg, b) +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go new file mode 100644 index 00000000000..53bf862ea5b --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" +) + +// Generic Thrift exception +type TException interface { + error + + TExceptionType() TExceptionType +} + +// Prepends additional information to an error without losing the Thrift exception interface +func PrependError(prepend string, err error) error { + msg := prepend + err.Error() + + var te TException + if errors.As(err, &te) { + switch te.TExceptionType() { + case TExceptionTypeTransport: + if t, ok := err.(TTransportException); ok { + return prependTTransportException(prepend, t) + } + case TExceptionTypeProtocol: + if t, ok := err.(TProtocolException); ok { + return prependTProtocolException(prepend, t) + } + case TExceptionTypeApplication: + var t TApplicationException + if errors.As(err, &t) { + return NewTApplicationException(t.TypeId(), msg) + } + } + + return wrappedTException{ + err: err, + msg: msg, + tExceptionType: te.TExceptionType(), + } + } + + return errors.New(msg) +} + +// TExceptionType is an enum type to categorize different "subclasses" of TExceptions. +type TExceptionType byte + +// TExceptionType values +const ( + TExceptionTypeUnknown TExceptionType = iota + TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler + TExceptionTypeApplication // TApplicationExceptions + TExceptionTypeProtocol // TProtocolExceptions + TExceptionTypeTransport // TTransportExceptions +) + +// WrapTException wraps an error into TException. +// +// If err is nil or already TException, it's returned as-is. +// Otherwise it will be wraped into TException with TExceptionType() returning +// TExceptionTypeUnknown, and Unwrap() returning the original error. +func WrapTException(err error) TException { + if err == nil { + return nil + } + + if te, ok := err.(TException); ok { + return te + } + + return wrappedTException{ + err: err, + msg: err.Error(), + tExceptionType: TExceptionTypeUnknown, + } +} + +type wrappedTException struct { + err error + msg string + tExceptionType TExceptionType +} + +func (w wrappedTException) Error() string { + return w.msg +} + +func (w wrappedTException) TExceptionType() TExceptionType { + return w.tExceptionType +} + +func (w wrappedTException) Unwrap() error { + return w.err +} + +var _ TException = wrappedTException{} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go new file mode 100644 index 00000000000..f683e7f544b --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "io" +) + +// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead. +const DEFAULT_MAX_LENGTH = 16384000 + +type TFramedTransport struct { + transport TTransport + + cfg *TConfiguration + + writeBuf bytes.Buffer + + reader *bufio.Reader + readBuf bytes.Buffer + + buffer [4]byte +} + +type tFramedTransportFactory struct { + factory TTransportFactory + cfg *TConfiguration +} + +// Deprecated: Use NewTFramedTransportFactoryConf instead. +func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { + return NewTFramedTransportFactoryConf(factory, &TConfiguration{ + MaxFrameSize: DEFAULT_MAX_LENGTH, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTFramedTransportFactoryConf instead. +func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { + return NewTFramedTransportFactoryConf(factory, &TConfiguration{ + MaxFrameSize: int32(maxLength), + + noPropagation: true, + }) +} + +func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { + PropagateTConfiguration(factory, conf) + return &tFramedTransportFactory{ + factory: factory, + cfg: conf, + } +} + +func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { + PropagateTConfiguration(base, p.cfg) + tt, err := p.factory.GetTransport(base) + if err != nil { + return nil, err + } + return NewTFramedTransportConf(tt, p.cfg), nil +} + +func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.factory, cfg) + p.cfg = cfg +} + +// Deprecated: Use NewTFramedTransportConf instead. +func NewTFramedTransport(transport TTransport) *TFramedTransport { + return NewTFramedTransportConf(transport, &TConfiguration{ + MaxFrameSize: DEFAULT_MAX_LENGTH, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTFramedTransportConf instead. +func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { + return NewTFramedTransportConf(transport, &TConfiguration{ + MaxFrameSize: int32(maxLength), + + noPropagation: true, + }) +} + +func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport { + PropagateTConfiguration(transport, conf) + return &TFramedTransport{ + transport: transport, + reader: bufio.NewReader(transport), + cfg: conf, + } +} + +func (p *TFramedTransport) Open() error { + return p.transport.Open() +} + +func (p *TFramedTransport) IsOpen() bool { + return p.transport.IsOpen() +} + +func (p *TFramedTransport) Close() error { + return p.transport.Close() +} + +func (p *TFramedTransport) Read(buf []byte) (read int, err error) { + read, err = p.readBuf.Read(buf) + if err != io.EOF { + return + } + + // For bytes.Buffer.Read, EOF would only happen when read is zero, + // but still, do a sanity check, + // in case that behavior is changed in a future version of go stdlib. + // When that happens, just return nil error, + // and let the caller call Read again to read the next frame. + if read > 0 { + return read, nil + } + + // Reaching here means that the last Read finished the last frame, + // so we need to read the next frame into readBuf now. + if err = p.readFrame(); err != nil { + return read, err + } + newRead, err := p.Read(buf[read:]) + return read + newRead, err +} + +func (p *TFramedTransport) ReadByte() (c byte, err error) { + buf := p.buffer[:1] + _, err = p.Read(buf) + if err != nil { + return + } + c = buf[0] + return +} + +func (p *TFramedTransport) Write(buf []byte) (int, error) { + n, err := p.writeBuf.Write(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) WriteByte(c byte) error { + return p.writeBuf.WriteByte(c) +} + +func (p *TFramedTransport) WriteString(s string) (n int, err error) { + return p.writeBuf.WriteString(s) +} + +func (p *TFramedTransport) Flush(ctx context.Context) error { + size := p.writeBuf.Len() + buf := p.buffer[:4] + binary.BigEndian.PutUint32(buf, uint32(size)) + _, err := p.transport.Write(buf) + if err != nil { + p.writeBuf.Reset() + return NewTTransportExceptionFromError(err) + } + if size > 0 { + if _, err := io.Copy(p.transport, &p.writeBuf); err != nil { + p.writeBuf.Reset() + return NewTTransportExceptionFromError(err) + } + } + err = p.transport.Flush(ctx) + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) readFrame() error { + buf := p.buffer[:4] + if _, err := io.ReadFull(p.reader, buf); err != nil { + return err + } + size := binary.BigEndian.Uint32(buf) + if size < 0 || size > uint32(p.cfg.GetMaxFrameSize()) { + return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) + } + _, err := io.CopyN(&p.readBuf, p.reader, int64(size)) + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { + return uint64(p.readBuf.Len()) +} + +// SetTConfiguration implements TConfigurationSetter. +func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.transport, cfg) + p.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*tFramedTransportFactory)(nil) + _ TConfigurationSetter = (*TFramedTransport)(nil) +) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go new file mode 100644 index 00000000000..ac9bd4882be --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. +type ( + headerKey string + headerKeyList int +) + +// Values for headerKeyList. +const ( + headerKeyListRead headerKeyList = iota + headerKeyListWrite +) + +// SetHeader sets a header in the context. +func SetHeader(ctx context.Context, key, value string) context.Context { + return context.WithValue( + ctx, + headerKey(key), + value, + ) +} + +// UnsetHeader unsets a previously set header in the context. +func UnsetHeader(ctx context.Context, key string) context.Context { + return context.WithValue( + ctx, + headerKey(key), + nil, + ) +} + +// GetHeader returns a value of the given header from the context. +func GetHeader(ctx context.Context, key string) (value string, ok bool) { + if v := ctx.Value(headerKey(key)); v != nil { + value, ok = v.(string) + } + return +} + +// SetReadHeaderList sets the key list of read THeaders in the context. +func SetReadHeaderList(ctx context.Context, keys []string) context.Context { + return context.WithValue( + ctx, + headerKeyListRead, + keys, + ) +} + +// GetReadHeaderList returns the key list of read THeaders from the context. +func GetReadHeaderList(ctx context.Context) []string { + if v := ctx.Value(headerKeyListRead); v != nil { + if value, ok := v.([]string); ok { + return value + } + } + return nil +} + +// SetWriteHeaderList sets the key list of THeaders to write in the context. +func SetWriteHeaderList(ctx context.Context, keys []string) context.Context { + return context.WithValue( + ctx, + headerKeyListWrite, + keys, + ) +} + +// GetWriteHeaderList returns the key list of THeaders to write from the context. +func GetWriteHeaderList(ctx context.Context) []string { + if v := ctx.Value(headerKeyListWrite); v != nil { + if value, ok := v.([]string); ok { + return value + } + } + return nil +} + +// AddReadTHeaderToContext adds the whole THeader headers into context. +func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context { + keys := make([]string, 0, len(headers)) + for key, value := range headers { + ctx = SetHeader(ctx, key, value) + keys = append(keys, key) + } + return SetReadHeaderList(ctx, keys) +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go new file mode 100644 index 00000000000..878041f8df1 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go @@ -0,0 +1,351 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" +) + +// THeaderProtocol is a thrift protocol that implements THeader: +// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md +// +// It supports either binary or compact protocol as the wrapped protocol. +// +// Most of the THeader handlings are happening inside THeaderTransport. +type THeaderProtocol struct { + transport *THeaderTransport + + // Will be initialized on first read/write. + protocol TProtocol + + cfg *TConfiguration +} + +// Deprecated: Use NewTHeaderProtocolConf instead. +func NewTHeaderProtocol(trans TTransport) *THeaderProtocol { + return newTHeaderProtocolConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying +// transport with given TConfiguration. +// +// The passed in transport will be wrapped with THeaderTransport. +// +// Note that THeaderTransport handles frame and zlib by itself, +// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), +// instead of rich transports like TZlibTransport or TFramedTransport. +func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol { + return newTHeaderProtocolConf(trans, conf) +} + +func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol { + t := NewTHeaderTransportConf(trans, cfg) + p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t) + PropagateTConfiguration(p, cfg) + return &THeaderProtocol{ + transport: t, + protocol: p, + cfg: cfg, + } +} + +type tHeaderProtocolFactory struct { + cfg *TConfiguration +} + +func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return newTHeaderProtocolConf(trans, f.cfg) +} + +func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) { + f.cfg = cfg +} + +// Deprecated: Use NewTHeaderProtocolFactoryConf instead. +func NewTHeaderProtocolFactory() TProtocolFactory { + return NewTHeaderProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderProtocolFactoryConf creates a factory for THeader with given +// TConfiguration. +func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory { + return tHeaderProtocolFactory{ + cfg: conf, + } +} + +// Transport returns the underlying transport. +// +// It's guaranteed to be of type *THeaderTransport. +func (p *THeaderProtocol) Transport() TTransport { + return p.transport +} + +// GetReadHeaders returns the THeaderMap read from transport. +func (p *THeaderProtocol) GetReadHeaders() THeaderMap { + return p.transport.GetReadHeaders() +} + +// SetWriteHeader sets a header for write. +func (p *THeaderProtocol) SetWriteHeader(key, value string) { + p.transport.SetWriteHeader(key, value) +} + +// ClearWriteHeaders clears all write headers previously set. +func (p *THeaderProtocol) ClearWriteHeaders() { + p.transport.ClearWriteHeaders() +} + +// AddTransform add a transform for writing. +func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { + return p.transport.AddTransform(transform) +} + +func (p *THeaderProtocol) Flush(ctx context.Context) error { + return p.transport.Flush(ctx) +} + +func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error { + newProto, err := p.transport.Protocol().GetProtocol(p.transport) + if err != nil { + return err + } + PropagateTConfiguration(newProto, p.cfg) + p.protocol = newProto + p.transport.SequenceID = seqID + return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID) +} + +func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error { + if err := p.protocol.WriteMessageEnd(ctx); err != nil { + return err + } + return p.transport.Flush(ctx) +} + +func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error { + return p.protocol.WriteStructBegin(ctx, name) +} + +func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error { + return p.protocol.WriteStructEnd(ctx) +} + +func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error { + return p.protocol.WriteFieldBegin(ctx, name, typeID, id) +} + +func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error { + return p.protocol.WriteFieldEnd(ctx) +} + +func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error { + return p.protocol.WriteFieldStop(ctx) +} + +func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + return p.protocol.WriteMapBegin(ctx, keyType, valueType, size) +} + +func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error { + return p.protocol.WriteMapEnd(ctx) +} + +func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + return p.protocol.WriteListBegin(ctx, elemType, size) +} + +func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error { + return p.protocol.WriteListEnd(ctx) +} + +func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + return p.protocol.WriteSetBegin(ctx, elemType, size) +} + +func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error { + return p.protocol.WriteSetEnd(ctx) +} + +func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error { + return p.protocol.WriteBool(ctx, value) +} + +func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error { + return p.protocol.WriteByte(ctx, value) +} + +func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error { + return p.protocol.WriteI16(ctx, value) +} + +func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error { + return p.protocol.WriteI32(ctx, value) +} + +func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error { + return p.protocol.WriteI64(ctx, value) +} + +func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error { + return p.protocol.WriteDouble(ctx, value) +} + +func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error { + return p.protocol.WriteString(ctx, value) +} + +func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error { + return p.protocol.WriteBinary(ctx, value) +} + +// ReadFrame calls underlying THeaderTransport's ReadFrame function. +func (p *THeaderProtocol) ReadFrame(ctx context.Context) error { + return p.transport.ReadFrame(ctx) +} + +func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) { + if err = p.transport.ReadFrame(ctx); err != nil { + return + } + + var newProto TProtocol + newProto, err = p.transport.Protocol().GetProtocol(p.transport) + if err != nil { + var tAppExc TApplicationException + if !errors.As(err, &tAppExc) { + return + } + if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil { + return + } + if e := tAppExc.Write(ctx, p.protocol); e != nil { + return + } + if e := p.protocol.WriteMessageEnd(ctx); e != nil { + return + } + if e := p.transport.Flush(ctx); e != nil { + return + } + return + } + PropagateTConfiguration(newProto, p.cfg) + p.protocol = newProto + + return p.protocol.ReadMessageBegin(ctx) +} + +func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error { + return p.protocol.ReadMessageEnd(ctx) +} + +func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + return p.protocol.ReadStructBegin(ctx) +} + +func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error { + return p.protocol.ReadStructEnd(ctx) +} + +func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) { + return p.protocol.ReadFieldBegin(ctx) +} + +func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error { + return p.protocol.ReadFieldEnd(ctx) +} + +func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + return p.protocol.ReadMapBegin(ctx) +} + +func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error { + return p.protocol.ReadMapEnd(ctx) +} + +func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.protocol.ReadListBegin(ctx) +} + +func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error { + return p.protocol.ReadListEnd(ctx) +} + +func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.protocol.ReadSetBegin(ctx) +} + +func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error { + return p.protocol.ReadSetEnd(ctx) +} + +func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) { + return p.protocol.ReadBool(ctx) +} + +func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) { + return p.protocol.ReadByte(ctx) +} + +func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) { + return p.protocol.ReadI16(ctx) +} + +func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) { + return p.protocol.ReadI32(ctx) +} + +func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) { + return p.protocol.ReadI64(ctx) +} + +func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + return p.protocol.ReadDouble(ctx) +} + +func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) { + return p.protocol.ReadString(ctx) +} + +func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + return p.protocol.ReadBinary(ctx) +} + +func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error { + return p.protocol.Skip(ctx, fieldType) +} + +// SetTConfiguration implements TConfigurationSetter. +func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.transport, cfg) + PropagateTConfiguration(p.protocol, cfg) + p.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil) + _ TConfigurationSetter = (*THeaderProtocol)(nil) +) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go new file mode 100644 index 00000000000..f5736df4276 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go @@ -0,0 +1,810 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "compress/zlib" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" +) + +// Size in bytes for 32-bit ints. +const size32 = 4 + +type headerMeta struct { + MagicFlags uint32 + SequenceID int32 + HeaderLength uint16 +} + +const headerMetaSize = 10 + +type clientType int + +const ( + clientUnknown clientType = iota + clientHeaders + clientFramedBinary + clientUnframedBinary + clientFramedCompact + clientUnframedCompact +) + +// Constants defined in THeader format: +// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md +const ( + THeaderHeaderMagic uint32 = 0x0fff0000 + THeaderHeaderMask uint32 = 0xffff0000 + THeaderFlagsMask uint32 = 0x0000ffff + THeaderMaxFrameSize uint32 = 0x3fffffff +) + +// THeaderMap is the type of the header map in THeader transport. +type THeaderMap map[string]string + +// THeaderProtocolID is the wrapped protocol id used in THeader. +type THeaderProtocolID int32 + +// Supported THeaderProtocolID values. +const ( + THeaderProtocolBinary THeaderProtocolID = 0x00 + THeaderProtocolCompact THeaderProtocolID = 0x02 + THeaderProtocolDefault = THeaderProtocolBinary +) + +// Declared globally to avoid repetitive allocations, not really used. +var globalMemoryBuffer = NewTMemoryBuffer() + +// Validate checks whether the THeaderProtocolID is a valid/supported one. +func (id THeaderProtocolID) Validate() error { + _, err := id.GetProtocol(globalMemoryBuffer) + return err +} + +// GetProtocol gets the corresponding TProtocol from the wrapped protocol id. +func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) { + switch id { + default: + return nil, NewTApplicationException( + INVALID_PROTOCOL, + fmt.Sprintf("THeader protocol id %d not supported", id), + ) + case THeaderProtocolBinary: + return NewTBinaryProtocolTransport(trans), nil + case THeaderProtocolCompact: + return NewTCompactProtocol(trans), nil + } +} + +// THeaderTransformID defines the numeric id of the transform used. +type THeaderTransformID int32 + +// THeaderTransformID values. +// +// Values not defined here are not currently supported, namely HMAC and Snappy. +const ( + TransformNone THeaderTransformID = iota // 0, no special handling + TransformZlib // 1, zlib +) + +var supportedTransformIDs = map[THeaderTransformID]bool{ + TransformNone: true, + TransformZlib: true, +} + +// TransformReader is an io.ReadCloser that handles transforms reading. +type TransformReader struct { + io.Reader + + closers []io.Closer +} + +var _ io.ReadCloser = (*TransformReader)(nil) + +// NewTransformReaderWithCapacity initializes a TransformReader with expected +// closers capacity. +// +// If you don't know the closers capacity beforehand, just use +// +// &TransformReader{Reader: baseReader} +// +// instead would be sufficient. +func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader { + return &TransformReader{ + Reader: baseReader, + closers: make([]io.Closer, 0, capacity), + } +} + +// Close calls the underlying closers in appropriate order, +// stops at and returns the first error encountered. +func (tr *TransformReader) Close() error { + // Call closers in reversed order + for i := len(tr.closers) - 1; i >= 0; i-- { + if err := tr.closers[i].Close(); err != nil { + return err + } + } + return nil +} + +// AddTransform adds a transform. +func (tr *TransformReader) AddTransform(id THeaderTransformID) error { + switch id { + default: + return NewTApplicationException( + INVALID_TRANSFORM, + fmt.Sprintf("THeaderTransformID %d not supported", id), + ) + case TransformNone: + // no-op + case TransformZlib: + readCloser, err := zlib.NewReader(tr.Reader) + if err != nil { + return err + } + tr.Reader = readCloser + tr.closers = append(tr.closers, readCloser) + } + return nil +} + +// TransformWriter is an io.WriteCloser that handles transforms writing. +type TransformWriter struct { + io.Writer + + closers []io.Closer +} + +var _ io.WriteCloser = (*TransformWriter)(nil) + +// NewTransformWriter creates a new TransformWriter with base writer and transforms. +func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) { + writer := &TransformWriter{ + Writer: baseWriter, + closers: make([]io.Closer, 0, len(transforms)), + } + for _, id := range transforms { + if err := writer.AddTransform(id); err != nil { + return nil, err + } + } + return writer, nil +} + +// Close calls the underlying closers in appropriate order, +// stops at and returns the first error encountered. +func (tw *TransformWriter) Close() error { + // Call closers in reversed order + for i := len(tw.closers) - 1; i >= 0; i-- { + if err := tw.closers[i].Close(); err != nil { + return err + } + } + return nil +} + +// AddTransform adds a transform. +func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { + switch id { + default: + return NewTApplicationException( + INVALID_TRANSFORM, + fmt.Sprintf("THeaderTransformID %d not supported", id), + ) + case TransformNone: + // no-op + case TransformZlib: + writeCloser := zlib.NewWriter(tw.Writer) + tw.Writer = writeCloser + tw.closers = append(tw.closers, writeCloser) + } + return nil +} + +// THeaderInfoType is the type id of the info headers. +type THeaderInfoType int32 + +// Supported THeaderInfoType values. +const ( + _ THeaderInfoType = iota // Skip 0 + InfoKeyValue // 1 + // Rest of the info types are not supported. +) + +// THeaderTransport is a Transport mode that implements THeader. +// +// Note that THeaderTransport handles frame and zlib by itself, +// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), +// instead of rich transports like TZlibTransport or TFramedTransport. +type THeaderTransport struct { + SequenceID int32 + Flags uint32 + + transport TTransport + + // THeaderMap for read and write + readHeaders THeaderMap + writeHeaders THeaderMap + + // Reading related variables. + reader *bufio.Reader + // When frame is detected, we read the frame fully into frameBuffer. + frameBuffer bytes.Buffer + // When it's non-nil, Read should read from frameReader instead of + // reader, and EOF error indicates end of frame instead of end of all + // transport. + frameReader io.ReadCloser + + // Writing related variables + writeBuffer bytes.Buffer + writeTransforms []THeaderTransformID + + clientType clientType + protocolID THeaderProtocolID + cfg *TConfiguration + + // buffer is used in the following scenarios to avoid repetitive + // allocations, while 4 is big enough for all those scenarios: + // + // * header padding (max size 4) + // * write the frame size (size 4) + buffer [4]byte +} + +var _ TTransport = (*THeaderTransport)(nil) + +// Deprecated: Use NewTHeaderTransportConf instead. +func NewTHeaderTransport(trans TTransport) *THeaderTransport { + return NewTHeaderTransportConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderTransportConf creates THeaderTransport from the +// underlying transport, with given TConfiguration attached. +// +// If trans is already a *THeaderTransport, it will be returned as is, +// but with TConfiguration overridden by the value passed in. +// +// The protocol ID in TConfiguration is only useful for client transports. +// For servers, +// the protocol ID will be overridden again to the one set by the client, +// to ensure that servers always speak the same dialect as the client. +func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport { + if ht, ok := trans.(*THeaderTransport); ok { + ht.SetTConfiguration(conf) + return ht + } + PropagateTConfiguration(trans, conf) + return &THeaderTransport{ + transport: trans, + reader: bufio.NewReader(trans), + writeHeaders: make(THeaderMap), + protocolID: conf.GetTHeaderProtocolID(), + cfg: conf, + } +} + +// Open calls the underlying transport's Open function. +func (t *THeaderTransport) Open() error { + return t.transport.Open() +} + +// IsOpen calls the underlying transport's IsOpen function. +func (t *THeaderTransport) IsOpen() bool { + return t.transport.IsOpen() +} + +// ReadFrame tries to read the frame header, guess the client type, and handle +// unframed clients. +func (t *THeaderTransport) ReadFrame(ctx context.Context) error { + if !t.needReadFrame() { + // No need to read frame, skipping. + return nil + } + + // Peek and handle the first 32 bits. + // They could either be the length field of a framed message, + // or the first bytes of an unframed message. + var buf []byte + var err error + // This is also usually the first read from a connection, + // so handle retries around socket timeouts. + _, deadlineSet := ctx.Deadline() + for { + buf, err = t.reader.Peek(size32) + if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { + // This is I/O timeout and we still have time, + // continue trying + continue + } + // For anything else, do not retry + break + } + if err != nil { + return err + } + + frameSize := binary.BigEndian.Uint32(buf) + if frameSize&VERSION_MASK == VERSION_1 { + t.clientType = clientUnframedBinary + return nil + } + if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { + t.clientType = clientUnframedCompact + return nil + } + + // At this point it should be a framed message, + // sanity check on frameSize then discard the peeked part. + if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + errors.New("frame too large"), + ) + } + t.reader.Discard(size32) + + // Read the frame fully into frameBuffer. + _, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize)) + if err != nil { + return err + } + t.frameReader = ioutil.NopCloser(&t.frameBuffer) + + // Peek and handle the next 32 bits. + buf = t.frameBuffer.Bytes()[:size32] + version := binary.BigEndian.Uint32(buf) + if version&THeaderHeaderMask == THeaderHeaderMagic { + t.clientType = clientHeaders + return t.parseHeaders(ctx, frameSize) + } + if version&VERSION_MASK == VERSION_1 { + t.clientType = clientFramedBinary + return nil + } + if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { + t.clientType = clientFramedCompact + return nil + } + if err := t.endOfFrame(); err != nil { + return err + } + return NewTProtocolExceptionWithType( + NOT_IMPLEMENTED, + errors.New("unsupported client transport type"), + ) +} + +// endOfFrame does end of frame handling. +// +// It closes frameReader, and also resets frame related states. +func (t *THeaderTransport) endOfFrame() error { + defer func() { + t.frameBuffer.Reset() + t.frameReader = nil + }() + return t.frameReader.Close() +} + +func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error { + if t.clientType != clientHeaders { + return nil + } + + var err error + var meta headerMeta + if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil { + return err + } + frameSize -= headerMetaSize + t.Flags = meta.MagicFlags & THeaderFlagsMask + t.SequenceID = meta.SequenceID + headerLength := int64(meta.HeaderLength) * 4 + if int64(frameSize) < headerLength { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + errors.New("header size is larger than the whole frame"), + ) + } + headerBuf := NewTMemoryBuffer() + _, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength) + if err != nil { + return err + } + hp := NewTCompactProtocol(headerBuf) + hp.SetTConfiguration(t.cfg) + + // At this point the header is already read into headerBuf, + // and t.frameBuffer starts from the actual payload. + protoID, err := hp.readVarint32() + if err != nil { + return err + } + t.protocolID = THeaderProtocolID(protoID) + + var transformCount int32 + transformCount, err = hp.readVarint32() + if err != nil { + return err + } + if transformCount > 0 { + reader := NewTransformReaderWithCapacity( + &t.frameBuffer, + int(transformCount), + ) + t.frameReader = reader + transformIDs := make([]THeaderTransformID, transformCount) + for i := 0; i < int(transformCount); i++ { + id, err := hp.readVarint32() + if err != nil { + return err + } + transformIDs[i] = THeaderTransformID(id) + } + // The transform IDs on the wire was added based on the order of + // writing, so on the reading side we need to reverse the order. + for i := transformCount - 1; i >= 0; i-- { + id := transformIDs[i] + if err := reader.AddTransform(id); err != nil { + return err + } + } + } + + // The info part does not use the transforms yet, so it's + // important to continue using headerBuf. + headers := make(THeaderMap) + for { + infoType, err := hp.readVarint32() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return err + } + if THeaderInfoType(infoType) == InfoKeyValue { + count, err := hp.readVarint32() + if err != nil { + return err + } + for i := 0; i < int(count); i++ { + key, err := hp.ReadString(ctx) + if err != nil { + return err + } + value, err := hp.ReadString(ctx) + if err != nil { + return err + } + headers[key] = value + } + } else { + // Skip reading info section on the first + // unsupported info type. + break + } + } + t.readHeaders = headers + + return nil +} + +func (t *THeaderTransport) needReadFrame() bool { + if t.clientType == clientUnknown { + // This is a new connection that's never read before. + return true + } + if t.isFramed() && t.frameReader == nil { + // We just finished the last frame. + return true + } + return false +} + +func (t *THeaderTransport) Read(p []byte) (read int, err error) { + // Here using context.Background instead of a context passed in is safe. + // First is that there's no way to pass context into this function. + // Then, 99% of the case when calling this Read frame is already read + // into frameReader. ReadFrame here is more of preventing bugs that + // didn't call ReadFrame before calling Read. + err = t.ReadFrame(context.Background()) + if err != nil { + return + } + if t.frameReader != nil { + read, err = t.frameReader.Read(p) + if err == nil && t.frameBuffer.Len() <= 0 { + // the last Read finished the frame, do endOfFrame + // handling here. + err = t.endOfFrame() + } else if err == io.EOF { + err = t.endOfFrame() + if err != nil { + return + } + if read == 0 { + // Try to read the next frame when we hit EOF + // (end of frame) immediately. + // When we got here, it means the last read + // finished the previous frame, but didn't + // do endOfFrame handling yet. + // We have to read the next frame here, + // as otherwise we would return 0 and nil, + // which is a case not handled well by most + // protocol implementations. + return t.Read(p) + } + } + return + } + return t.reader.Read(p) +} + +// Write writes data to the write buffer. +// +// You need to call Flush to actually write them to the transport. +func (t *THeaderTransport) Write(p []byte) (int, error) { + return t.writeBuffer.Write(p) +} + +// Flush writes the appropriate header and the write buffer to the underlying transport. +func (t *THeaderTransport) Flush(ctx context.Context) error { + if t.writeBuffer.Len() == 0 { + return nil + } + + defer t.writeBuffer.Reset() + + switch t.clientType { + default: + fallthrough + case clientUnknown: + t.clientType = clientHeaders + fallthrough + case clientHeaders: + headers := NewTMemoryBuffer() + hp := NewTCompactProtocol(headers) + hp.SetTConfiguration(t.cfg) + if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil { + return NewTTransportExceptionFromError(err) + } + for _, transform := range t.writeTransforms { + if _, err := hp.writeVarint32(int32(transform)); err != nil { + return NewTTransportExceptionFromError(err) + } + } + if len(t.writeHeaders) > 0 { + if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil { + return NewTTransportExceptionFromError(err) + } + for key, value := range t.writeHeaders { + if err := hp.WriteString(ctx, key); err != nil { + return NewTTransportExceptionFromError(err) + } + if err := hp.WriteString(ctx, value); err != nil { + return NewTTransportExceptionFromError(err) + } + } + } + padding := 4 - headers.Len()%4 + if padding < 4 { + buf := t.buffer[:padding] + for i := range buf { + buf[i] = 0 + } + if _, err := headers.Write(buf); err != nil { + return NewTTransportExceptionFromError(err) + } + } + + var payload bytes.Buffer + meta := headerMeta{ + MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask, + SequenceID: t.SequenceID, + HeaderLength: uint16(headers.Len() / 4), + } + if err := binary.Write(&payload, binary.BigEndian, meta); err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := io.Copy(&payload, headers); err != nil { + return NewTTransportExceptionFromError(err) + } + + writer, err := NewTransformWriter(&payload, t.writeTransforms) + if err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := io.Copy(writer, &t.writeBuffer); err != nil { + return NewTTransportExceptionFromError(err) + } + if err := writer.Close(); err != nil { + return NewTTransportExceptionFromError(err) + } + + // First write frame length + buf := t.buffer[:size32] + binary.BigEndian.PutUint32(buf, uint32(payload.Len())) + if _, err := t.transport.Write(buf); err != nil { + return NewTTransportExceptionFromError(err) + } + // Then write the payload + if _, err := io.Copy(t.transport, &payload); err != nil { + return NewTTransportExceptionFromError(err) + } + + case clientFramedBinary, clientFramedCompact: + buf := t.buffer[:size32] + binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len())) + if _, err := t.transport.Write(buf); err != nil { + return NewTTransportExceptionFromError(err) + } + fallthrough + case clientUnframedBinary, clientUnframedCompact: + if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil { + return NewTTransportExceptionFromError(err) + } + } + + select { + default: + case <-ctx.Done(): + return NewTTransportExceptionFromError(ctx.Err()) + } + + return t.transport.Flush(ctx) +} + +// Close closes the transport, along with its underlying transport. +func (t *THeaderTransport) Close() error { + if err := t.Flush(context.Background()); err != nil { + return err + } + return t.transport.Close() +} + +// RemainingBytes calls underlying transport's RemainingBytes. +// +// Even in framed cases, because of all the possible compression transforms +// involved, the remaining frame size is likely to be different from the actual +// remaining readable bytes, so we don't bother to keep tracking the remaining +// frame size by ourselves and just use the underlying transport's +// RemainingBytes directly. +func (t *THeaderTransport) RemainingBytes() uint64 { + return t.transport.RemainingBytes() +} + +// GetReadHeaders returns the THeaderMap read from transport. +func (t *THeaderTransport) GetReadHeaders() THeaderMap { + return t.readHeaders +} + +// SetWriteHeader sets a header for write. +func (t *THeaderTransport) SetWriteHeader(key, value string) { + t.writeHeaders[key] = value +} + +// ClearWriteHeaders clears all write headers previously set. +func (t *THeaderTransport) ClearWriteHeaders() { + t.writeHeaders = make(THeaderMap) +} + +// AddTransform add a transform for writing. +func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error { + if !supportedTransformIDs[transform] { + return NewTProtocolExceptionWithType( + NOT_IMPLEMENTED, + fmt.Errorf("THeaderTransformID %d not supported", transform), + ) + } + t.writeTransforms = append(t.writeTransforms, transform) + return nil +} + +// Protocol returns the wrapped protocol id used in this THeaderTransport. +func (t *THeaderTransport) Protocol() THeaderProtocolID { + switch t.clientType { + default: + return t.protocolID + case clientFramedBinary, clientUnframedBinary: + return THeaderProtocolBinary + case clientFramedCompact, clientUnframedCompact: + return THeaderProtocolCompact + } +} + +func (t *THeaderTransport) isFramed() bool { + switch t.clientType { + default: + return false + case clientHeaders, clientFramedBinary, clientFramedCompact: + return true + } +} + +// SetTConfiguration implements TConfigurationSetter. +func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(t.transport, cfg) + t.cfg = cfg +} + +// THeaderTransportFactory is a TTransportFactory implementation to create +// THeaderTransport. +// +// It also implements TConfigurationSetter. +type THeaderTransportFactory struct { + // The underlying factory, could be nil. + Factory TTransportFactory + + cfg *TConfiguration +} + +// Deprecated: Use NewTHeaderTransportFactoryConf instead. +func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory { + return NewTHeaderTransportFactoryConf(factory, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with +// the given *TConfiguration. +func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { + return &THeaderTransportFactory{ + Factory: factory, + + cfg: conf, + } +} + +// GetTransport implements TTransportFactory. +func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if f.Factory != nil { + t, err := f.Factory.GetTransport(trans) + if err != nil { + return nil, err + } + return NewTHeaderTransportConf(t, f.cfg), nil + } + return NewTHeaderTransportConf(trans, f.cfg), nil +} + +// SetTConfiguration implements TConfigurationSetter. +func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.Factory, f.cfg) + f.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*THeaderTransportFactory)(nil) + _ TConfigurationSetter = (*THeaderTransport)(nil) +) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go new file mode 100644 index 00000000000..15015864f0f --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go @@ -0,0 +1,258 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +// Default to using the shared http client. Library users are +// free to change this global client or specify one through +// THttpClientOptions. +var DefaultHttpClient *http.Client = http.DefaultClient + +type THttpClient struct { + client *http.Client + response *http.Response + url *url.URL + requestBuffer *bytes.Buffer + header http.Header + nsecConnectTimeout int64 + nsecReadTimeout int64 +} + +type THttpClientTransportFactory struct { + options THttpClientOptions + url string +} + +func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*THttpClient) + if ok && t.url != nil { + return NewTHttpClientWithOptions(t.url.String(), p.options) + } + } + return NewTHttpClientWithOptions(p.url, p.options) +} + +type THttpClientOptions struct { + // If nil, DefaultHttpClient is used + Client *http.Client +} + +func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return &THttpClientTransportFactory{url: url, options: options} +} + +func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + parsedURL, err := url.Parse(urlstr) + if err != nil { + return nil, err + } + buf := make([]byte, 0, 1024) + client := options.Client + if client == nil { + client = DefaultHttpClient + } + httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}} + return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil +} + +func NewTHttpClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} + +// Set the HTTP Header for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") +func (p *THttpClient) SetHeader(key string, value string) { + p.header.Add(key, value) +} + +// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// hdrValue := httpTrans.GetHeader("User-Agent") +func (p *THttpClient) GetHeader(key string) string { + return p.header.Get(key) +} + +// Deletes the HTTP Header given a Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.DelHeader("User-Agent") +func (p *THttpClient) DelHeader(key string) { + p.header.Del(key) +} + +func (p *THttpClient) Open() error { + // do nothing + return nil +} + +func (p *THttpClient) IsOpen() bool { + return p.response != nil || p.requestBuffer != nil +} + +func (p *THttpClient) closeResponse() error { + var err error + if p.response != nil && p.response.Body != nil { + // The docs specify that if keepalive is enabled and the response body is not + // read to completion the connection will never be returned to the pool and + // reused. Errors are being ignored here because if the connection is invalid + // and this fails for some reason, the Close() method will do any remaining + // cleanup. + io.Copy(ioutil.Discard, p.response.Body) + + err = p.response.Body.Close() + } + + p.response = nil + return err +} + +func (p *THttpClient) Close() error { + if p.requestBuffer != nil { + p.requestBuffer.Reset() + p.requestBuffer = nil + } + return p.closeResponse() +} + +func (p *THttpClient) Read(buf []byte) (int, error) { + if p.response == nil { + return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") + } + n, err := p.response.Body.Read(buf) + if n > 0 && (err == nil || errors.Is(err, io.EOF)) { + return n, nil + } + return n, NewTTransportExceptionFromError(err) +} + +func (p *THttpClient) ReadByte() (c byte, err error) { + if p.response == nil { + return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") + } + return readByte(p.response.Body) +} + +func (p *THttpClient) Write(buf []byte) (int, error) { + if p.requestBuffer == nil { + return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.Write(buf) +} + +func (p *THttpClient) WriteByte(c byte) error { + if p.requestBuffer == nil { + return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.WriteByte(c) +} + +func (p *THttpClient) WriteString(s string) (n int, err error) { + if p.requestBuffer == nil { + return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.WriteString(s) +} + +func (p *THttpClient) Flush(ctx context.Context) error { + // Close any previous response body to avoid leaking connections. + p.closeResponse() + + // Give up the ownership of the current request buffer to http request, + // and create a new buffer for the next request. + buf := p.requestBuffer + p.requestBuffer = new(bytes.Buffer) + req, err := http.NewRequest("POST", p.url.String(), buf) + if err != nil { + return NewTTransportExceptionFromError(err) + } + req.Header = p.header + if ctx != nil { + req = req.WithContext(ctx) + } + response, err := p.client.Do(req) + if err != nil { + return NewTTransportExceptionFromError(err) + } + if response.StatusCode != http.StatusOK { + // Close the response to avoid leaking file descriptors. closeResponse does + // more than just call Close(), so temporarily assign it and reuse the logic. + p.response = response + p.closeResponse() + + // TODO(pomack) log bad response + return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) + } + p.response = response + return nil +} + +func (p *THttpClient) RemainingBytes() (num_bytes uint64) { + len := p.response.ContentLength + if len >= 0 { + return uint64(len) + } + + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +// Deprecated: Use NewTHttpClientTransportFactory instead. +func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. +func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, options) +} + +// Deprecated: Use NewTHttpClientWithOptions instead. +func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, options) +} + +// Deprecated: Use NewTHttpClient instead. +func NewTHttpPostClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go new file mode 100644 index 00000000000..bc6922762ad --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "compress/gzip" + "io" + "net/http" + "strings" + "sync" +) + +// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function +func NewThriftHandlerFunc(processor TProcessor, + inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { + + return gz(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/x-thrift") + + transport := NewStreamTransport(r.Body, w) + processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) + }) +} + +// gz transparently compresses the HTTP response if the client supports it. +func gz(handler http.HandlerFunc) http.HandlerFunc { + sp := &sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, + } + + return func(w http.ResponseWriter, r *http.Request) { + if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + handler(w, r) + return + } + w.Header().Set("Content-Encoding", "gzip") + gz := sp.Get().(*gzip.Writer) + gz.Reset(w) + defer func() { + _ = gz.Close() + sp.Put(gz) + }() + gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} + handler(gzw, r) + } +} + +type gzipResponseWriter struct { + io.Writer + http.ResponseWriter +} + +func (w gzipResponseWriter) Write(b []byte) (int, error) { + return w.Writer.Write(b) +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go new file mode 100644 index 00000000000..1c477990fea --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "context" + "io" +) + +// StreamTransport is a Transport made of an io.Reader and/or an io.Writer +type StreamTransport struct { + io.Reader + io.Writer + isReadWriter bool + closed bool +} + +type StreamTransportFactory struct { + Reader io.Reader + Writer io.Writer + isReadWriter bool +} + +func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*StreamTransport) + if ok { + if t.isReadWriter { + return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil + } + if t.Reader != nil && t.Writer != nil { + return NewStreamTransport(t.Reader, t.Writer), nil + } + if t.Reader != nil && t.Writer == nil { + return NewStreamTransportR(t.Reader), nil + } + if t.Reader == nil && t.Writer != nil { + return NewStreamTransportW(t.Writer), nil + } + return &StreamTransport{}, nil + } + } + if p.isReadWriter { + return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil + } + if p.Reader != nil && p.Writer != nil { + return NewStreamTransport(p.Reader, p.Writer), nil + } + if p.Reader != nil && p.Writer == nil { + return NewStreamTransportR(p.Reader), nil + } + if p.Reader == nil && p.Writer != nil { + return NewStreamTransportW(p.Writer), nil + } + return &StreamTransport{}, nil +} + +func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { + return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} +} + +func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportR(r io.Reader) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r)} +} + +func NewStreamTransportW(w io.Writer) *StreamTransport { + return &StreamTransport{Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { + bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) + return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} +} + +func (p *StreamTransport) IsOpen() bool { + return !p.closed +} + +// implicitly opened on creation, can't be reopened once closed +func (p *StreamTransport) Open() error { + if !p.closed { + return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") + } else { + return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") + } +} + +// Closes both the input and output streams. +func (p *StreamTransport) Close() error { + if p.closed { + return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") + } + p.closed = true + closedReader := false + if p.Reader != nil { + c, ok := p.Reader.(io.Closer) + if ok { + e := c.Close() + closedReader = true + if e != nil { + return e + } + } + p.Reader = nil + } + if p.Writer != nil && (!closedReader || !p.isReadWriter) { + c, ok := p.Writer.(io.Closer) + if ok { + e := c.Close() + if e != nil { + return e + } + } + p.Writer = nil + } + return nil +} + +// Flushes the underlying output stream if not null. +func (p *StreamTransport) Flush(ctx context.Context) error { + if p.Writer == nil { + return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") + } + f, ok := p.Writer.(Flusher) + if ok { + err := f.Flush() + if err != nil { + return NewTTransportExceptionFromError(err) + } + } + return nil +} + +func (p *StreamTransport) Read(c []byte) (n int, err error) { + n, err = p.Reader.Read(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) ReadByte() (c byte, err error) { + f, ok := p.Reader.(io.ByteReader) + if ok { + c, err = f.ReadByte() + } else { + c, err = readByte(p.Reader) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) Write(c []byte) (n int, err error) { + n, err = p.Writer.Write(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteByte(c byte) (err error) { + f, ok := p.Writer.(io.ByteWriter) + if ok { + err = f.WriteByte(c) + } else { + err = writeByte(p.Writer, c) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteString(s string) (n int, err error) { + f, ok := p.Writer.(stringWriter) + if ok { + n, err = f.WriteString(s) + } else { + n, err = p.Writer.Write([]byte(s)) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.Reader, conf) + PropagateTConfiguration(p.Writer, conf) +} + +var _ TConfigurationSetter = (*StreamTransport)(nil) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go new file mode 100644 index 00000000000..8e59d16cfda --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go @@ -0,0 +1,591 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "encoding/base64" + "fmt" +) + +const ( + THRIFT_JSON_PROTOCOL_VERSION = 1 +) + +// for references to _ParseContext see tsimplejson_protocol.go + +// JSON protocol implementation for thrift. +// Utilizes Simple JSON protocol +// +type TJSONProtocol struct { + *TSimpleJSONProtocol +} + +// Constructor +func NewTJSONProtocol(t TTransport) *TJSONProtocol { + v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} + v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) + v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) + return v +} + +// Factory +type TJSONProtocolFactory struct{} + +func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTJSONProtocol(trans) +} + +func NewTJSONProtocolFactory() *TJSONProtocolFactory { + return &TJSONProtocolFactory{} +} + +func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil { + return e + } + if e := p.WriteString(ctx, name); e != nil { + return e + } + if e := p.WriteByte(ctx, int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(ctx, seqId); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if e := p.WriteI16(ctx, id); e != nil { + return e + } + if e := p.OutputObjectBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(typeId) + if e1 != nil { + return e1 + } + if e := p.WriteString(ctx, s); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } + +func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(keyType) + if e1 != nil { + return e1 + } + if e := p.WriteString(ctx, s); e != nil { + return e + } + s, e1 = p.TypeIdToString(valueType) + if e1 != nil { + return e1 + } + if e := p.WriteString(ctx, s); e != nil { + return e + } + if e := p.WriteI64(ctx, int64(size)); e != nil { + return e + } + return p.OutputObjectBegin() +} + +func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error { + if e := p.OutputObjectEnd(); e != nil { + return e + } + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error { + if b { + return p.WriteI32(ctx, 1) + } + return p.WriteI32(ctx, 0) +} + +func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error { + return p.WriteI32(ctx, int32(b)) +} + +func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error { + return p.WriteI32(ctx, int32(v)) +} + +func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error { + return p.OutputF64(v) +} + +func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error { + return p.OutputString(v) +} + +func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +// Reading methods. +func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + version, err := p.ReadI32(ctx) + if err != nil { + return name, typeId, seqId, err + } + if version != THRIFT_JSON_PROTOCOL_VERSION { + e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) + return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) + + } + if name, err = p.ReadString(ctx); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte(ctx) + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(ctx); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error { + err := p.ParseListEnd() + return err +} + +func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { + b, _ := p.reader.Peek(1) + if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { + return "", STOP, -1, nil + } + fieldId, err := p.ReadI16(ctx) + if err != nil { + return "", STOP, fieldId, err + } + if _, err = p.ParseObjectStart(); err != nil { + return "", STOP, fieldId, err + } + sType, err := p.ReadString(ctx) + if err != nil { + return "", STOP, fieldId, err + } + fType, err := p.StringToTypeId(sType) + return "", fType, fieldId, err +} + +func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + sKeyType, e := p.ReadString(ctx) + if e != nil { + return keyType, valueType, size, e + } + keyType, e = p.StringToTypeId(sKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + sValueType, e := p.ReadString(ctx) + if e != nil { + return keyType, valueType, size, e + } + valueType, e = p.StringToTypeId(sValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, e := p.ReadI64(ctx) + if e != nil { + return keyType, valueType, size, e + } + size = int(iSize) + + _, e = p.ParseObjectStart() + return keyType, valueType, size, e +} + +func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error { + e := p.ParseObjectEnd() + if e != nil { + return e + } + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) { + value, err := p.ReadI32(ctx) + return (value != 0), err +} + +func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.ReadI64(ctx) + return int8(v), err +} + +func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) { + v, err := p.ReadI64(ctx) + return int16(v), err +} + +func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) { + v, err := p.ReadI64(ctx) + return int32(v), err +} + +func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { + err = p.writer.Flush() + if err == nil { + err = p.trans.Flush(ctx) + } + return NewTProtocolException(err) +} + +func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(elemType) + if e1 != nil { + return e1 + } + if e := p.OutputString(s); e != nil { + return e + } + if e := p.OutputI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + // We don't really use the ctx in ReadString implementation, + // so this is safe for now. + // We might want to add context to ParseElemListBegin if we start to use + // ctx in ReadString implementation in the future. + sElemType, err := p.ReadString(context.Background()) + if err != nil { + return VOID, size, err + } + elemType, err = p.StringToTypeId(sElemType) + if err != nil { + return elemType, size, err + } + nSize, _, err2 := p.ParseI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + // We don't really use the ctx in ReadString implementation, + // so this is safe for now. + // We might want to add context to ParseElemListBegin if we start to use + // ctx in ReadString implementation in the future. + sElemType, err := p.ReadString(context.Background()) + if err != nil { + return VOID, size, err + } + elemType, err = p.StringToTypeId(sElemType) + if err != nil { + return elemType, size, err + } + nSize, _, err2 := p.ParseI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(elemType) + if e1 != nil { + return e1 + } + if e := p.OutputString(s); e != nil { + return e + } + if e := p.OutputI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { + switch byte(fieldType) { + case BOOL: + return "tf", nil + case BYTE: + return "i8", nil + case I16: + return "i16", nil + case I32: + return "i32", nil + case I64: + return "i64", nil + case DOUBLE: + return "dbl", nil + case STRING: + return "str", nil + case STRUCT: + return "rec", nil + case MAP: + return "map", nil + case SET: + return "set", nil + case LIST: + return "lst", nil + } + + e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { + switch fieldType { + case "tf": + return TType(BOOL), nil + case "i8": + return TType(BYTE), nil + case "i16": + return TType(I16), nil + case "i32": + return TType(I32), nil + case "i64": + return TType(I64), nil + case "dbl": + return TType(DOUBLE), nil + case "str": + return TType(STRING), nil + case "rec": + return TType(STRUCT), nil + case "map": + return TType(MAP), nil + case "set": + return TType(SET), nil + case "lst": + return TType(LIST), nil + } + + e := fmt.Errorf("Unknown type identifier: %s", fieldType) + return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +var _ TConfigurationSetter = (*TJSONProtocol)(nil) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go new file mode 100644 index 00000000000..c42aac998b7 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "log" + "os" + "testing" +) + +// Logger is a simple wrapper of a logging function. +// +// In reality the users might actually use different logging libraries, and they +// are not always compatible with each other. +// +// Logger is meant to be a simple common ground that it's easy to wrap whatever +// logging library they use into. +// +// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design +// discussion behind it. +type Logger func(msg string) + +// NopLogger is a Logger implementation that does nothing. +func NopLogger(msg string) {} + +// StdLogger wraps stdlib log package into a Logger. +// +// If logger passed in is nil, it will fallback to use stderr and default flags. +func StdLogger(logger *log.Logger) Logger { + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + return func(msg string) { + logger.Print(msg) + } +} + +// TestLogger is a Logger implementation can be used in test codes. +// +// It fails the test when being called. +func TestLogger(tb testing.TB) Logger { + return func(msg string) { + tb.Errorf("logger called with msg: %q", msg) + } +} + +func fallbackLogger(logger Logger) Logger { + if logger == nil { + return StdLogger(nil) + } + return logger +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go new file mode 100644 index 00000000000..5936d273037 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" +) + +// Memory buffer-based implementation of the TTransport interface. +type TMemoryBuffer struct { + *bytes.Buffer + size int +} + +type TMemoryBufferTransportFactory struct { + size int +} + +func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*TMemoryBuffer) + if ok && t.size > 0 { + return NewTMemoryBufferLen(t.size), nil + } + } + return NewTMemoryBufferLen(p.size), nil +} + +func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { + return &TMemoryBufferTransportFactory{size: size} +} + +func NewTMemoryBuffer() *TMemoryBuffer { + return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} +} + +func NewTMemoryBufferLen(size int) *TMemoryBuffer { + buf := make([]byte, 0, size) + return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} +} + +func (p *TMemoryBuffer) IsOpen() bool { + return true +} + +func (p *TMemoryBuffer) Open() error { + return nil +} + +func (p *TMemoryBuffer) Close() error { + p.Buffer.Reset() + return nil +} + +// Flushing a memory buffer is a no-op +func (p *TMemoryBuffer) Flush(ctx context.Context) error { + return nil +} + +func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { + return uint64(p.Buffer.Len()) +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go new file mode 100644 index 00000000000..25ab2e98a25 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Message type constants in the Thrift protocol. +type TMessageType int32 + +const ( + INVALID_TMESSAGE_TYPE TMessageType = 0 + CALL TMessageType = 1 + REPLY TMessageType = 2 + EXCEPTION TMessageType = 3 + ONEWAY TMessageType = 4 +) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go new file mode 100644 index 00000000000..8a788df02be --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the +// TProcessorFunctions for that TProcessor. +// +// Middlewares are passed in the name of the function as set in the processor +// map of the TProcessor. +type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction + +// WrapProcessor takes an existing TProcessor and wraps each of its inner +// TProcessorFunctions with the middlewares passed in and returns it. +// +// Middlewares will be called in the order that they are defined: +// +// 1. Middlewares[0] +// 2. Middlewares[1] +// ... +// N. Middlewares[n] +func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor { + for name, processorFunc := range processor.ProcessorMap() { + wrapped := processorFunc + // Add middlewares in reverse so the first in the list is the outermost. + for i := len(middlewares) - 1; i >= 0; i-- { + wrapped = middlewares[i](name, wrapped) + } + processor.AddToProcessorMap(name, wrapped) + } + return processor +} + +// WrappedTProcessorFunction is a convenience struct that implements the +// TProcessorFunction interface that can be used when implementing custom +// Middleware. +type WrappedTProcessorFunction struct { + // Wrapped is called by WrappedTProcessorFunction.Process and should be a + // "wrapped" call to a base TProcessorFunc.Process call. + Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} + +// Process implements the TProcessorFunction interface using p.Wrapped. +func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) { + return p.Wrapped(ctx, seqID, in, out) +} + +// verify that WrappedTProcessorFunction implements TProcessorFunction +var ( + _ TProcessorFunction = WrappedTProcessorFunction{} + _ TProcessorFunction = (*WrappedTProcessorFunction)(nil) +) + +// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls +// with custom middleware. +type ClientMiddleware func(TClient) TClient + +// WrappedTClient is a convenience struct that implements the TClient interface +// using inner Wrapped function. +// +// This is provided to aid in developing ClientMiddleware. +type WrappedTClient struct { + Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) +} + +// Call implements the TClient interface by calling and returning c.Wrapped. +func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { + return c.Wrapped(ctx, method, args, result) +} + +// verify that WrappedTClient implements TClient +var ( + _ TClient = WrappedTClient{} + _ TClient = (*WrappedTClient)(nil) +) + +// WrapClient wraps the given TClient in the given middlewares. +// +// Middlewares will be called in the order that they are defined: +// +// 1. Middlewares[0] +// 2. Middlewares[1] +// ... +// N. Middlewares[n] +func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient { + // Add middlewares in reverse so the first in the list is the outermost. + for i := len(middlewares) - 1; i >= 0; i-- { + client = middlewares[i](client) + } + return client +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go new file mode 100644 index 00000000000..cacbf6bef3e --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "fmt" + "strings" +) + +/* +TMultiplexedProtocol is a protocol-independent concrete decorator +that allows a Thrift client to communicate with a multiplexing Thrift server, +by prepending the service name to the function name during function calls. + +NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request +from a multiplexing client. + +This example uses a single socket transport to invoke two services: + +socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) +transport := thrift.NewTFramedTransport(socket) +protocol := thrift.NewTBinaryProtocolTransport(transport) + +mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") +service := Calculator.NewCalculatorClient(mp) + +mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") +service2 := WeatherReport.NewWeatherReportClient(mp2) + +err := transport.Open() +if err != nil { + t.Fatal("Unable to open client socket", err) +} + +fmt.Println(service.Add(2,2)) +fmt.Println(service2.GetTemperature()) +*/ + +type TMultiplexedProtocol struct { + TProtocol + serviceName string +} + +const MULTIPLEXED_SEPARATOR = ":" + +func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { + return &TMultiplexedProtocol{ + TProtocol: protocol, + serviceName: serviceName, + } +} + +func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + if typeId == CALL || typeId == ONEWAY { + return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) + } else { + return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid) + } +} + +/* +TMultiplexedProcessor is a TProcessor allowing +a single TServer to provide multiple services. + +To do so, you instantiate the processor and then register additional +processors with it, as shown in the following example: + +var processor = thrift.NewTMultiplexedProcessor() + +firstProcessor := +processor.RegisterProcessor("FirstService", firstProcessor) + +processor.registerProcessor( + "Calculator", + Calculator.NewCalculatorProcessor(&CalculatorHandler{}), +) + +processor.registerProcessor( + "WeatherReport", + WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), +) + +serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) +if err != nil { + t.Fatal("Unable to create server socket", err) +} +server := thrift.NewTSimpleServer2(processor, serverTransport) +server.Serve(); +*/ + +type TMultiplexedProcessor struct { + serviceProcessorMap map[string]TProcessor + DefaultProcessor TProcessor +} + +func NewTMultiplexedProcessor() *TMultiplexedProcessor { + return &TMultiplexedProcessor{ + serviceProcessorMap: make(map[string]TProcessor), + } +} + +// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}" +// to TProcessorFunction for any registered processors. If there is also a +// DefaultProcessor, the keys for the methods on that processor will simply be +// "{FunctionName}". If the TMultiplexedProcessor has both a DefaultProcessor and +// other registered processors, then the keys will be a mix of both formats. +// +// The implementation differs with other TProcessors in that the map returned is +// a new map, while most TProcessors just return their internal mapping directly. +// This means that edits to the map returned by this implementation of ProcessorMap +// will not affect the underlying mapping within the TMultiplexedProcessor. +func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction { + processorFuncMap := make(map[string]TProcessorFunction) + for name, processor := range t.serviceProcessorMap { + for method, processorFunc := range processor.ProcessorMap() { + processorFuncName := name + MULTIPLEXED_SEPARATOR + method + processorFuncMap[processorFuncName] = processorFunc + } + } + if t.DefaultProcessor != nil { + for method, processorFunc := range t.DefaultProcessor.ProcessorMap() { + processorFuncMap[method] = processorFunc + } + } + return processorFuncMap +} + +// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on +// the format of "name". +// +// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}", +// then it sets the given TProcessorFunction on the inner TProcessor with the +// ProcessorName component using the FunctionName component. +// +// If "name" is just in the format "{FunctionName}", that is to say there is no +// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor +// configured, then it will set the given TProcessorFunction on the DefaultProcessor +// using the given name. +// +// If there is not a TProcessor available for the given name, then this function +// does nothing. This can happen when there is no TProcessor registered for +// the given ProcessorName or if all that is given is the FunctionName and there +// is no DefaultProcessor set. +func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) { + components := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(components) != 2 { + if t.DefaultProcessor != nil && len(components) == 1 { + t.DefaultProcessor.AddToProcessorMap(components[0], processorFunc) + } + return + } + processorName := components[0] + funcName := components[1] + if processor, ok := t.serviceProcessorMap[processorName]; ok { + processor.AddToProcessorMap(funcName, processorFunc) + } + +} + +// verify that TMultiplexedProcessor implements TProcessor +var _ TProcessor = (*TMultiplexedProcessor)(nil) + +func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { + t.DefaultProcessor = processor +} + +func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { + if t.serviceProcessorMap == nil { + t.serviceProcessorMap = make(map[string]TProcessor) + } + t.serviceProcessorMap[name] = processor +} + +func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { + name, typeId, seqid, err := in.ReadMessageBegin(ctx) + if err != nil { + return false, NewTProtocolException(err) + } + if typeId != CALL && typeId != ONEWAY { + return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId)) + } + //extract the service name + v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(v) != 2 { + if t.DefaultProcessor != nil { + smb := NewStoredMessageProtocol(in, name, typeId, seqid) + return t.DefaultProcessor.Process(ctx, smb, out) + } + return false, NewTProtocolException(fmt.Errorf( + "Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", + name, + )) + } + actualProcessor, ok := t.serviceProcessorMap[v[0]] + if !ok { + return false, NewTProtocolException(fmt.Errorf( + "Service name not found: %s. Did you forget to call registerProcessor()?", + v[0], + )) + } + smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) + return actualProcessor.Process(ctx, smb, out) +} + +//Protocol that use stored message for ReadMessageBegin +type storedMessageProtocol struct { + TProtocol + name string + typeId TMessageType + seqid int32 +} + +func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { + return &storedMessageProtocol{protocol, name, typeId, seqid} +} + +func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { + return s.name, s.typeId, s.seqid, nil +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go new file mode 100644 index 00000000000..e4512d204c0 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "math" + "strconv" +) + +type Numeric interface { + Int64() int64 + Int32() int32 + Int16() int16 + Byte() byte + Int() int + Float64() float64 + Float32() float32 + String() string + isNull() bool +} + +type numeric struct { + iValue int64 + dValue float64 + sValue string + isNil bool +} + +var ( + INFINITY Numeric + NEGATIVE_INFINITY Numeric + NAN Numeric + ZERO Numeric + NUMERIC_NULL Numeric +) + +func NewNumericFromDouble(dValue float64) Numeric { + if math.IsInf(dValue, 1) { + return INFINITY + } + if math.IsInf(dValue, -1) { + return NEGATIVE_INFINITY + } + if math.IsNaN(dValue) { + return NAN + } + iValue := int64(dValue) + sValue := strconv.FormatFloat(dValue, 'g', 10, 64) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI64(iValue int64) Numeric { + dValue := float64(iValue) + sValue := strconv.FormatInt(iValue, 10) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI32(iValue int32) Numeric { + dValue := float64(iValue) + sValue := strconv.FormatInt(int64(iValue), 10) + isNil := false + return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromString(sValue string) Numeric { + if sValue == INFINITY.String() { + return INFINITY + } + if sValue == NEGATIVE_INFINITY.String() { + return NEGATIVE_INFINITY + } + if sValue == NAN.String() { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + isNil := len(sValue) == 0 + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromJSONString(sValue string, isNull bool) Numeric { + if isNull { + return NewNullNumeric() + } + if sValue == JSON_INFINITY { + return INFINITY + } + if sValue == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY + } + if sValue == JSON_NAN { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} +} + +func NewNullNumeric() Numeric { + return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} +} + +func (p *numeric) Int64() int64 { + return p.iValue +} + +func (p *numeric) Int32() int32 { + return int32(p.iValue) +} + +func (p *numeric) Int16() int16 { + return int16(p.iValue) +} + +func (p *numeric) Byte() byte { + return byte(p.iValue) +} + +func (p *numeric) Int() int { + return int(p.iValue) +} + +func (p *numeric) Float64() float64 { + return p.dValue +} + +func (p *numeric) Float32() float32 { + return float32(p.dValue) +} + +func (p *numeric) String() string { + return p.sValue +} + +func (p *numeric) isNull() bool { + return p.isNil +} + +func init() { + INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} + NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} + NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} + ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} + NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go new file mode 100644 index 00000000000..fb564ea8193 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +/////////////////////////////////////////////////////////////////////////////// +// This file is home to helpers that convert from various base types to +// respective pointer types. This is necessary because Go does not permit +// references to constants, nor can a pointer type to base type be allocated +// and initialized in a single expression. +// +// E.g., this is not allowed: +// +// var ip *int = &5 +// +// But this *is* allowed: +// +// func IntPtr(i int) *int { return &i } +// var ip *int = IntPtr(5) +// +// Since pointers to base types are commonplace as [optional] fields in +// exported thrift structs, we factor such helpers here. +/////////////////////////////////////////////////////////////////////////////// + +func Float32Ptr(v float32) *float32 { return &v } +func Float64Ptr(v float64) *float64 { return &v } +func IntPtr(v int) *int { return &v } +func Int8Ptr(v int8) *int8 { return &v } +func Int16Ptr(v int16) *int16 { return &v } +func Int32Ptr(v int32) *int32 { return &v } +func Int64Ptr(v int64) *int64 { return &v } +func StringPtr(v string) *string { return &v } +func Uint32Ptr(v uint32) *uint32 { return &v } +func Uint64Ptr(v uint64) *uint64 { return &v } +func BoolPtr(v bool) *bool { return &v } +func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go new file mode 100644 index 00000000000..245a3ccfc98 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +// A processor is a generic object which operates upon an input stream and +// writes to some output stream. +type TProcessor interface { + Process(ctx context.Context, in, out TProtocol) (bool, TException) + + // ProcessorMap returns a map of thrift method names to TProcessorFunctions. + ProcessorMap() map[string]TProcessorFunction + + // AddToProcessorMap adds the given TProcessorFunction to the internal + // processor map at the given key. + // + // If one is already set at the given key, it will be replaced with the new + // TProcessorFunction. + AddToProcessorMap(string, TProcessorFunction) +} + +type TProcessorFunction interface { + Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} + +// The default processor factory just returns a singleton +// instance. +type TProcessorFactory interface { + GetProcessor(trans TTransport) TProcessor +} + +type tProcessorFactory struct { + processor TProcessor +} + +func NewTProcessorFactory(p TProcessor) TProcessorFactory { + return &tProcessorFactory{processor: p} +} + +func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { + return p.processor +} + +/** + * The default processor factory just returns a singleton + * instance. + */ +type TProcessorFunctionFactory interface { + GetProcessorFunction(trans TTransport) TProcessorFunction +} + +type tProcessorFunctionFactory struct { + processor TProcessorFunction +} + +func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { + return &tProcessorFunctionFactory{processor: p} +} + +func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { + return p.processor +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go new file mode 100644 index 00000000000..0a69bd4162d --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" + "fmt" +) + +const ( + VERSION_MASK = 0xffff0000 + VERSION_1 = 0x80010000 +) + +type TProtocol interface { + WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error + WriteMessageEnd(ctx context.Context) error + WriteStructBegin(ctx context.Context, name string) error + WriteStructEnd(ctx context.Context) error + WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error + WriteFieldEnd(ctx context.Context) error + WriteFieldStop(ctx context.Context) error + WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error + WriteMapEnd(ctx context.Context) error + WriteListBegin(ctx context.Context, elemType TType, size int) error + WriteListEnd(ctx context.Context) error + WriteSetBegin(ctx context.Context, elemType TType, size int) error + WriteSetEnd(ctx context.Context) error + WriteBool(ctx context.Context, value bool) error + WriteByte(ctx context.Context, value int8) error + WriteI16(ctx context.Context, value int16) error + WriteI32(ctx context.Context, value int32) error + WriteI64(ctx context.Context, value int64) error + WriteDouble(ctx context.Context, value float64) error + WriteString(ctx context.Context, value string) error + WriteBinary(ctx context.Context, value []byte) error + + ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) + ReadMessageEnd(ctx context.Context) error + ReadStructBegin(ctx context.Context) (name string, err error) + ReadStructEnd(ctx context.Context) error + ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) + ReadFieldEnd(ctx context.Context) error + ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) + ReadMapEnd(ctx context.Context) error + ReadListBegin(ctx context.Context) (elemType TType, size int, err error) + ReadListEnd(ctx context.Context) error + ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) + ReadSetEnd(ctx context.Context) error + ReadBool(ctx context.Context) (value bool, err error) + ReadByte(ctx context.Context) (value int8, err error) + ReadI16(ctx context.Context) (value int16, err error) + ReadI32(ctx context.Context) (value int32, err error) + ReadI64(ctx context.Context) (value int64, err error) + ReadDouble(ctx context.Context) (value float64, err error) + ReadString(ctx context.Context) (value string, err error) + ReadBinary(ctx context.Context) (value []byte, err error) + + Skip(ctx context.Context, fieldType TType) (err error) + Flush(ctx context.Context) (err error) + + Transport() TTransport +} + +// The maximum recursive depth the skip() function will traverse +const DEFAULT_RECURSION_DEPTH = 64 + +// Skips over the next data element from the provided input TProtocol object. +func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) { + return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH) +} + +// Skips over the next data element from the provided input TProtocol object. +func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) { + + if maxDepth <= 0 { + return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) + } + + switch fieldType { + case BOOL: + _, err = self.ReadBool(ctx) + return + case BYTE: + _, err = self.ReadByte(ctx) + return + case I16: + _, err = self.ReadI16(ctx) + return + case I32: + _, err = self.ReadI32(ctx) + return + case I64: + _, err = self.ReadI64(ctx) + return + case DOUBLE: + _, err = self.ReadDouble(ctx) + return + case STRING: + _, err = self.ReadString(ctx) + return + case STRUCT: + if _, err = self.ReadStructBegin(ctx); err != nil { + return err + } + for { + _, typeId, _, _ := self.ReadFieldBegin(ctx) + if typeId == STOP { + break + } + err := Skip(ctx, self, typeId, maxDepth-1) + if err != nil { + return err + } + self.ReadFieldEnd(ctx) + } + return self.ReadStructEnd(ctx) + case MAP: + keyType, valueType, size, err := self.ReadMapBegin(ctx) + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(ctx, self, keyType, maxDepth-1) + if err != nil { + return err + } + self.Skip(ctx, valueType) + } + return self.ReadMapEnd(ctx) + case SET: + elemType, size, err := self.ReadSetBegin(ctx) + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(ctx, self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadSetEnd(ctx) + case LIST: + elemType, size, err := self.ReadListBegin(ctx) + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(ctx, self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadListEnd(ctx) + default: + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType))) + } + return nil +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go new file mode 100644 index 00000000000..9dcf4bfd94c --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/base64" + "errors" +) + +// Thrift Protocol exception +type TProtocolException interface { + TException + TypeId() int +} + +const ( + UNKNOWN_PROTOCOL_EXCEPTION = 0 + INVALID_DATA = 1 + NEGATIVE_SIZE = 2 + SIZE_LIMIT = 3 + BAD_VERSION = 4 + NOT_IMPLEMENTED = 5 + DEPTH_LIMIT = 6 +) + +type tProtocolException struct { + typeId int + err error + msg string +} + +var _ TProtocolException = (*tProtocolException)(nil) + +func (tProtocolException) TExceptionType() TExceptionType { + return TExceptionTypeProtocol +} + +func (p *tProtocolException) TypeId() int { + return p.typeId +} + +func (p *tProtocolException) String() string { + return p.msg +} + +func (p *tProtocolException) Error() string { + return p.msg +} + +func (p *tProtocolException) Unwrap() error { + return p.err +} + +func NewTProtocolException(err error) TProtocolException { + if err == nil { + return nil + } + + if e, ok := err.(TProtocolException); ok { + return e + } + + if errors.As(err, new(base64.CorruptInputError)) { + return NewTProtocolExceptionWithType(INVALID_DATA, err) + } + + return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err) +} + +func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { + if err == nil { + return nil + } + return &tProtocolException{ + typeId: errType, + err: err, + msg: err.Error(), + } +} + +func prependTProtocolException(prepend string, err TProtocolException) TProtocolException { + return &tProtocolException{ + typeId: err.TypeId(), + err: err, + msg: prepend + err.Error(), + } +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go new file mode 100644 index 00000000000..c40f796d886 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory interface for constructing protocol instances. +type TProtocolFactory interface { + GetProtocol(trans TTransport) TProtocol +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go new file mode 100644 index 00000000000..d884c6ac6c4 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. +type responseHelperKey struct{} + +// TResponseHelper defines a object with a set of helper functions that can be +// retrieved from the context object passed into server handler functions. +// +// Use GetResponseHelper to retrieve the injected TResponseHelper implementation +// from the context object. +// +// The zero value of TResponseHelper is valid with all helper functions being +// no-op. +type TResponseHelper struct { + // THeader related functions + *THeaderResponseHelper +} + +// THeaderResponseHelper defines THeader related TResponseHelper functions. +// +// The zero value of *THeaderResponseHelper is valid with all helper functions +// being no-op. +type THeaderResponseHelper struct { + proto *THeaderProtocol +} + +// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the +// underlying TProtocol. +func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper { + if hp, ok := proto.(*THeaderProtocol); ok { + return &THeaderResponseHelper{ + proto: hp, + } + } + return nil +} + +// SetHeader sets a response header. +// +// It's no-op if the underlying protocol/transport does not support THeader. +func (h *THeaderResponseHelper) SetHeader(key, value string) { + if h != nil && h.proto != nil { + h.proto.SetWriteHeader(key, value) + } +} + +// ClearHeaders clears all the response headers previously set. +// +// It's no-op if the underlying protocol/transport does not support THeader. +func (h *THeaderResponseHelper) ClearHeaders() { + if h != nil && h.proto != nil { + h.proto.ClearWriteHeaders() + } +} + +// GetResponseHelper retrieves the TResponseHelper implementation injected into +// the context object. +// +// If no helper was found in the context object, a nop helper with ok == false +// will be returned. +func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) { + if v := ctx.Value(responseHelperKey{}); v != nil { + helper, ok = v.(TResponseHelper) + } + return +} + +// SetResponseHelper injects TResponseHelper into the context object. +func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context { + return context.WithValue(ctx, responseHelperKey{}, helper) +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go new file mode 100644 index 00000000000..83fdf29f5cb --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +type RichTransport struct { + TTransport +} + +// Wraps Transport to provide TRichTransport interface +func NewTRichTransport(trans TTransport) *RichTransport { + return &RichTransport{trans} +} + +func (r *RichTransport) ReadByte() (c byte, err error) { + return readByte(r.TTransport) +} + +func (r *RichTransport) WriteByte(c byte) error { + return writeByte(r.TTransport, c) +} + +func (r *RichTransport) WriteString(s string) (n int, err error) { + return r.Write([]byte(s)) +} + +func (r *RichTransport) RemainingBytes() (num_bytes uint64) { + return r.TTransport.RemainingBytes() +} + +func readByte(r io.Reader) (c byte, err error) { + v := [1]byte{0} + n, err := r.Read(v[0:1]) + if n > 0 && (err == nil || errors.Is(err, io.EOF)) { + return v[0], nil + } + if n > 0 && err != nil { + return v[0], err + } + if err != nil { + return 0, err + } + return v[0], nil +} + +func writeByte(w io.Writer, c byte) error { + v := [1]byte{c} + _, err := w.Write(v[0:1]) + return err +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go new file mode 100644 index 00000000000..c44979094c6 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "sync" +) + +type TSerializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +type TStruct interface { + Write(ctx context.Context, p TProtocol) error + Read(ctx context.Context, p TProtocol) error +} + +func NewTSerializer() *TSerializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolTransport(transport) + + return &TSerializer{ + Transport: transport, + Protocol: protocol, + } +} + +func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { + t.Transport.Reset() + + if err = msg.Write(ctx, t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + if err = t.Transport.Flush(ctx); err != nil { + return + } + + return t.Transport.String(), nil +} + +func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { + t.Transport.Reset() + + if err = msg.Write(ctx, t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + + if err = t.Transport.Flush(ctx); err != nil { + return + } + + b = append(b, t.Transport.Bytes()...) + return +} + +// TSerializerPool is the thread-safe version of TSerializer, it uses resource +// pool of TSerializer under the hood. +// +// It must be initialized with either NewTSerializerPool or +// NewTSerializerPoolSizeFactory. +type TSerializerPool struct { + pool sync.Pool +} + +// NewTSerializerPool creates a new TSerializerPool. +// +// NewTSerializer can be used as the arg here. +func NewTSerializerPool(f func() *TSerializer) *TSerializerPool { + return &TSerializerPool{ + pool: sync.Pool{ + New: func() interface{} { + return f() + }, + }, + } +} + +// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given +// size and protocol factory. +// +// Note that the size is not the limit. The TMemoryBuffer underneath can grow +// larger than that. It just dictates the initial size. +func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool { + return &TSerializerPool{ + pool: sync.Pool{ + New: func() interface{} { + transport := NewTMemoryBufferLen(size) + protocol := factory.GetProtocol(transport) + + return &TSerializer{ + Transport: transport, + Protocol: protocol, + } + }, + }, + } +} + +func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) { + s := t.pool.Get().(*TSerializer) + defer t.pool.Put(s) + return s.WriteString(ctx, msg) +} + +func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) { + s := t.pool.Get().(*TSerializer) + defer t.pool.Put(s) + return s.Write(ctx, msg) +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go new file mode 100644 index 00000000000..f813fa3532c --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +type TServer interface { + ProcessorFactory() TProcessorFactory + ServerTransport() TServerTransport + InputTransportFactory() TTransportFactory + OutputTransportFactory() TTransportFactory + InputProtocolFactory() TProtocolFactory + OutputProtocolFactory() TProtocolFactory + + // Starts the server + Serve() error + // Stops the server. This is optional on a per-implementation basis. Not + // all servers are required to be cleanly stoppable. + Stop() error +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go new file mode 100644 index 00000000000..7dd24ae3648 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net" + "sync" + "time" +) + +type TServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + + // Protects the interrupted value to make it thread safe. + mu sync.RWMutex + interrupted bool +} + +func NewTServerSocket(listenAddr string) (*TServerSocket, error) { + return NewTServerSocketTimeout(listenAddr, 0) +} + +func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil +} + +// Creates a TServerSocket from a net.Addr +func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { + return &TServerSocket{addr: addr, clientTimeout: clientTimeout} +} + +func (p *TServerSocket) Listen() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return nil + } + l, err := net.Listen(p.addr.Network(), p.addr.String()) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TServerSocket) Accept() (TTransport, error) { + p.mu.RLock() + interrupted := p.interrupted + p.mu.RUnlock() + + if interrupted { + return nil, errTransportInterrupted + } + + p.mu.Lock() + listener := p.listener + p.mu.Unlock() + if listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + + conn, err := listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TServerSocket) Open() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TServerSocket) Addr() net.Addr { + if p.listener != nil { + return p.listener.Addr() + } + return p.addr +} + +func (p *TServerSocket) Close() error { + var err error + p.mu.Lock() + if p.IsListening() { + err = p.listener.Close() + p.listener = nil + } + p.mu.Unlock() + return err +} + +func (p *TServerSocket) Interrupt() error { + p.mu.Lock() + p.interrupted = true + p.mu.Unlock() + p.Close() + + return nil +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go new file mode 100644 index 00000000000..51c40b64a1d --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Server transport. Object which provides client transports. +type TServerTransport interface { + Listen() error + Accept() (TTransport, error) + Close() error + + // Optional method implementation. This signals to the server transport + // that it should break out of any accept() or listen() that it is currently + // blocked on. This method, if implemented, MUST be thread safe, as it may + // be called from a different thread context than the other TServerTransport + // methods. + Interrupt() error +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go new file mode 100644 index 00000000000..d1a8154532d --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go @@ -0,0 +1,1373 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "strconv" +) + +type _ParseContext int + +const ( + _CONTEXT_INVALID _ParseContext = iota + _CONTEXT_IN_TOPLEVEL // 1 + _CONTEXT_IN_LIST_FIRST // 2 + _CONTEXT_IN_LIST // 3 + _CONTEXT_IN_OBJECT_FIRST // 4 + _CONTEXT_IN_OBJECT_NEXT_KEY // 5 + _CONTEXT_IN_OBJECT_NEXT_VALUE // 6 +) + +func (p _ParseContext) String() string { + switch p { + case _CONTEXT_IN_TOPLEVEL: + return "TOPLEVEL" + case _CONTEXT_IN_LIST_FIRST: + return "LIST-FIRST" + case _CONTEXT_IN_LIST: + return "LIST" + case _CONTEXT_IN_OBJECT_FIRST: + return "OBJECT-FIRST" + case _CONTEXT_IN_OBJECT_NEXT_KEY: + return "OBJECT-NEXT-KEY" + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + return "OBJECT-NEXT-VALUE" + } + return "UNKNOWN-PARSE-CONTEXT" +} + +type jsonContextStack []_ParseContext + +func (s *jsonContextStack) push(v _ParseContext) { + *s = append(*s, v) +} + +func (s jsonContextStack) peek() (v _ParseContext, ok bool) { + l := len(s) + if l <= 0 { + return + } + return s[l-1], true +} + +func (s *jsonContextStack) pop() (v _ParseContext, ok bool) { + l := len(*s) + if l <= 0 { + return + } + v = (*s)[l-1] + *s = (*s)[0 : l-1] + return v, true +} + +var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack")) + +// Simple JSON protocol implementation for thrift. +// +// This protocol produces/consumes a simple output format +// suitable for parsing by scripting languages. It should not be +// confused with the full-featured TJSONProtocol. +// +type TSimpleJSONProtocol struct { + trans TTransport + + parseContextStack jsonContextStack + dumpContext jsonContextStack + + writer *bufio.Writer + reader *bufio.Reader +} + +// Constructor +func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { + v := &TSimpleJSONProtocol{trans: t, + writer: bufio.NewWriter(t), + reader: bufio.NewReader(t), + } + v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) + v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) + return v +} + +// Factory +type TSimpleJSONProtocolFactory struct{} + +func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTSimpleJSONProtocol(trans) +} + +func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { + return &TSimpleJSONProtocolFactory{} +} + +var ( + JSON_COMMA []byte + JSON_COLON []byte + JSON_LBRACE []byte + JSON_RBRACE []byte + JSON_LBRACKET []byte + JSON_RBRACKET []byte + JSON_QUOTE byte + JSON_QUOTE_BYTES []byte + JSON_NULL []byte + JSON_TRUE []byte + JSON_FALSE []byte + JSON_INFINITY string + JSON_NEGATIVE_INFINITY string + JSON_NAN string + JSON_INFINITY_BYTES []byte + JSON_NEGATIVE_INFINITY_BYTES []byte + JSON_NAN_BYTES []byte + json_nonbase_map_elem_bytes []byte +) + +func init() { + JSON_COMMA = []byte{','} + JSON_COLON = []byte{':'} + JSON_LBRACE = []byte{'{'} + JSON_RBRACE = []byte{'}'} + JSON_LBRACKET = []byte{'['} + JSON_RBRACKET = []byte{']'} + JSON_QUOTE = '"' + JSON_QUOTE_BYTES = []byte{'"'} + JSON_NULL = []byte{'n', 'u', 'l', 'l'} + JSON_TRUE = []byte{'t', 'r', 'u', 'e'} + JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} + JSON_INFINITY = "Infinity" + JSON_NEGATIVE_INFINITY = "-Infinity" + JSON_NAN = "NaN" + JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NAN_BYTES = []byte{'N', 'a', 'N'} + json_nonbase_map_elem_bytes = []byte{']', ',', '['} +} + +func jsonQuote(s string) string { + b, _ := json.Marshal(s) + s1 := string(b) + return s1 +} + +func jsonUnquote(s string) (string, bool) { + s1 := new(string) + err := json.Unmarshal([]byte(s), s1) + return *s1, err == nil +} + +func mismatch(expected, actual string) error { + return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) +} + +func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteString(ctx, name); e != nil { + return e + } + if e := p.WriteByte(ctx, int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(ctx, seqId); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error { + return p.OutputObjectEnd() +} + +func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if e := p.WriteString(ctx, name); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error { + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } + +func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteByte(ctx, int8(keyType)); e != nil { + return e + } + if e := p.WriteByte(ctx, int8(valueType)); e != nil { + return e + } + return p.WriteI32(ctx, int32(size)) +} + +func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error { + return p.OutputBool(b) +} + +func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error { + return p.WriteI32(ctx, int32(b)) +} + +func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error { + return p.WriteI32(ctx, int32(v)) +} + +func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error { + return p.OutputF64(v) +} + +func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error { + return p.OutputString(v) +} + +func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +// Reading methods. +func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + if name, err = p.ReadString(ctx); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte(ctx) + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(ctx); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error { + return p.ParseObjectEnd() +} + +func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { + if err := p.ParsePreValue(); err != nil { + return "", STOP, 0, err + } + b, _ := p.reader.Peek(1) + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return "", STOP, 0, nil + case JSON_QUOTE: + p.reader.ReadByte() + name, err := p.ParseStringBody() + // simplejson is not meant to be read back into thrift + // - see http://wiki.apache.org/thrift/ThriftUsageJava + // - use JSON instead + if err != nil { + return name, STOP, 0, err + } + return name, STOP, -1, p.ParsePostValue() + } + e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) + return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return "", STOP, 0, NewTProtocolException(io.EOF) +} + +func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error { + return nil +} + +func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + bKeyType, e := p.ReadByte(ctx) + keyType = TType(bKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + bValueType, e := p.ReadByte(ctx) + valueType = TType(bValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, err := p.ReadI64(ctx) + size = int(iSize) + return keyType, valueType, size, err +} + +func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) { + var value bool + + if err := p.ParsePreValue(); err != nil { + return value, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 { + switch f[0] { + case JSON_TRUE[0]: + b := make([]byte, len(JSON_TRUE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_TRUE) { + value = true + } else { + e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + break + case JSON_FALSE[0]: + b := make([]byte, len(JSON_FALSE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_FALSE) { + value = false + } else { + e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + break + case JSON_NULL[0]: + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_NULL) { + value = false + } else { + e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + default: + e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + return value, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.ReadI64(ctx) + return int8(v), err +} + +func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) { + v, err := p.ReadI64(ctx) + return int16(v), err +} + +func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) { + v, err := p.ReadI64(ctx) + return int32(v), err +} + +func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.writer.Flush()) +} + +func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TSimpleJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TSimpleJSONProtocol) OutputPreValue() error { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { + case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: + if _, e := p.write(JSON_COMMA); e != nil { + return NewTProtocolException(e) + } + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if _, e := p.write(JSON_COLON); e != nil { + return NewTProtocolException(e) + } + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputPostValue() error { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_LIST) + case _CONTEXT_IN_OBJECT_FIRST: + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) + case _CONTEXT_IN_OBJECT_NEXT_KEY: + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY) + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputBool(value bool) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if value { + v = string(JSON_TRUE) + } else { + v = string(JSON_FALSE) + } + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputNull() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_NULL); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputF64(value float64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if math.IsNaN(value) { + v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) + } else if math.IsInf(value, 1) { + v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) + } else if math.IsInf(value, -1) { + v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) + } else { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + v = strconv.FormatFloat(value, 'g', -1, 64) + switch cxt { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = string(JSON_QUOTE) + v + string(JSON_QUOTE) + } + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputI64(value int64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + v := strconv.FormatInt(value, 10) + switch cxt { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputString(s string) error { + if e := p.OutputPreValue(); e != nil { + return e + } + if e := p.OutputStringData(jsonQuote(s)); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputStringData(s string) error { + _, e := p.write([]byte(s)) + return NewTProtocolException(e) +} + +func (p *TSimpleJSONProtocol) OutputObjectBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACE); e != nil { + return NewTProtocolException(e) + } + p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST) + return nil +} + +func (p *TSimpleJSONProtocol) OutputObjectEnd() error { + if _, e := p.write(JSON_RBRACE); e != nil { + return NewTProtocolException(e) + } + _, ok := p.dumpContext.pop() + if !ok { + return errEmptyJSONContextStack + } + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputListBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACKET); e != nil { + return NewTProtocolException(e) + } + p.dumpContext.push(_CONTEXT_IN_LIST_FIRST) + return nil +} + +func (p *TSimpleJSONProtocol) OutputListEnd() error { + if _, e := p.write(JSON_RBRACKET); e != nil { + return NewTProtocolException(e) + } + _, ok := p.dumpContext.pop() + if !ok { + return errEmptyJSONContextStack + } + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.OutputI64(int64(elemType)); e != nil { + return e + } + if e := p.OutputI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePreValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt, ok := p.parseContextStack.peek() + if !ok { + return errEmptyJSONContextStack + } + b, _ := p.reader.Peek(1) + switch cxt { + case _CONTEXT_IN_LIST: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACKET[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + case _CONTEXT_IN_OBJECT_NEXT_KEY: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if len(b) > 0 { + switch b[0] { + case JSON_COLON[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePostValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt, ok := p.parseContextStack.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_LIST) + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY) + } + return nil +} + +func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { + for { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return nil + } + switch b[0] { + case ' ', '\r', '\n', '\t': + p.reader.ReadByte() + continue + default: + break + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + l := len(line) + // count number of escapes to see if we need to keep going + i := 1 + for ; i < l; i++ { + if line[l-i-1] != '\\' { + break + } + } + if i&0x01 == 1 { + v, ok := jsonUnquote(string(JSON_QUOTE) + line) + if !ok { + return "", NewTProtocolException(err) + } + return v, nil + } + s, err := p.ParseQuotedStringBody() + if err != nil { + return "", NewTProtocolException(err) + } + str := string(JSON_QUOTE) + line + s + v, ok := jsonUnquote(str) + if !ok { + e := fmt.Errorf("Unable to parse as JSON string %s", str) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, nil +} + +func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + l := len(line) + // count number of escapes to see if we need to keep going + i := 1 + for ; i < l; i++ { + if line[l-i-1] != '\\' { + break + } + } + if i&0x01 == 1 { + return line, nil + } + s, err := p.ParseQuotedStringBody() + if err != nil { + return "", NewTProtocolException(err) + } + v := line + s + return v, nil +} + +func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { + line, err := p.reader.ReadBytes(JSON_QUOTE) + if err != nil { + return line, NewTProtocolException(err) + } + line2 := line[0 : len(line)-1] + l := len(line2) + if (l % 4) != 0 { + pad := 4 - (l % 4) + fill := [...]byte{'=', '=', '='} + line2 = append(line2, fill[:pad]...) + l = len(line2) + } + output := make([]byte, base64.StdEncoding.DecodedLen(l)) + n, err := base64.StdEncoding.Decode(output, line2) + return output[0:n], NewTProtocolException(err) +} + +func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value int64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Int64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value float64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Float64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { + if err := p.ParsePreValue(); err != nil { + return false, err + } + var b []byte + b, err := p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) > 0 && b[0] == JSON_LBRACE[0] { + p.reader.ReadByte() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST) + return false, nil + } else if p.safePeekContains(JSON_NULL) { + return true, nil + } + e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) + return false, NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TSimpleJSONProtocol) ParseObjectEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt, _ := p.parseContextStack.peek() + if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { + e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACE[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', '}': + break + } + } + p.parseContextStack.pop() + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { + if e := p.ParsePreValue(); e != nil { + return false, e + } + var b []byte + b, err = p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { + p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST) + p.reader.ReadByte() + isNull = false + } else if p.safePeekContains(JSON_NULL) { + isNull = true + } else { + err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) + } + return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) +} + +func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + bElemType, _, err := p.ParseI64() + elemType = TType(bElemType) + if err != nil { + return elemType, size, err + } + nSize, _, err2 := p.ParseI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TSimpleJSONProtocol) ParseListEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt, _ := p.parseContextStack.peek() + if cxt != _CONTEXT_IN_LIST { + e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACKET[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): + break + } + } + p.parseContextStack.pop() + if cxt, ok := p.parseContextStack.peek(); !ok { + return errEmptyJSONContextStack + } else if cxt == _CONTEXT_IN_TOPLEVEL { + return nil + } + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) { + e := p.readNonSignificantWhitespace() + if e != nil { + return nil, VOID, NewTProtocolException(e) + } + b, e := p.reader.Peek(1) + if len(b) > 0 { + c := b[0] + switch c { + case JSON_NULL[0]: + buf := make([]byte, len(JSON_NULL)) + _, e := p.reader.Read(buf) + if e != nil { + return nil, VOID, NewTProtocolException(e) + } + if string(JSON_NULL) != string(buf) { + e = mismatch(string(JSON_NULL), string(buf)) + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return nil, VOID, nil + case JSON_QUOTE: + p.reader.ReadByte() + v, e := p.ParseStringBody() + if e != nil { + return v, UTF8, NewTProtocolException(e) + } + if v == JSON_INFINITY { + return INFINITY, DOUBLE, nil + } else if v == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY, DOUBLE, nil + } else if v == JSON_NAN { + return NAN, DOUBLE, nil + } + return v, UTF8, nil + case JSON_TRUE[0]: + buf := make([]byte, len(JSON_TRUE)) + _, e := p.reader.Read(buf) + if e != nil { + return true, BOOL, NewTProtocolException(e) + } + if string(JSON_TRUE) != string(buf) { + e := mismatch(string(JSON_TRUE), string(buf)) + return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return true, BOOL, nil + case JSON_FALSE[0]: + buf := make([]byte, len(JSON_FALSE)) + _, e := p.reader.Read(buf) + if e != nil { + return false, BOOL, NewTProtocolException(e) + } + if string(JSON_FALSE) != string(buf) { + e := mismatch(string(JSON_FALSE), string(buf)) + return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return false, BOOL, nil + case JSON_LBRACKET[0]: + _, e := p.reader.ReadByte() + return make([]interface{}, 0), LIST, NewTProtocolException(e) + case JSON_LBRACE[0]: + _, e := p.reader.ReadByte() + return make(map[string]interface{}), STRUCT, NewTProtocolException(e) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: + // assume numeric + v, e := p.readNumeric() + return v, DOUBLE, e + default: + e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c)) + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + e = fmt.Errorf("Cannot read a single element while parsing JSON.") + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + +} + +func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { + cont := true + for cont { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return false, nil + } + switch b[0] { + default: + return false, nil + case JSON_NULL[0]: + cont = false + break + case ' ', '\n', '\r', '\t': + p.reader.ReadByte() + break + } + } + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + return true, nil + } + return false, nil +} + +func (p *TSimpleJSONProtocol) readQuoteIfNext() { + b, _ := p.reader.Peek(1) + if len(b) > 0 && b[0] == JSON_QUOTE { + p.reader.ReadByte() + } +} + +func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { + isNull, err := p.readIfNull() + if isNull || err != nil { + return NUMERIC_NULL, err + } + hasDecimalPoint := false + nextCanBeSign := true + hasE := false + MAX_LEN := 40 + buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) + continueFor := true + inQuotes := false + for continueFor { + c, err := p.reader.ReadByte() + if err != nil { + if err == io.EOF { + break + } + return NUMERIC_NULL, NewTProtocolException(err) + } + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + buf.WriteByte(c) + nextCanBeSign = false + case '.': + if hasDecimalPoint { + e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if hasE { + e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasDecimalPoint, nextCanBeSign = true, false + case 'e', 'E': + if hasE { + e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasE, nextCanBeSign = true, true + case '-', '+': + if !nextCanBeSign { + e := fmt.Errorf("Negative sign within number") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + nextCanBeSign = false + case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: + p.reader.UnreadByte() + continueFor = false + case JSON_NAN[0]: + if buf.Len() == 0 { + buffer := make([]byte, len(JSON_NAN)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NAN != string(buffer) { + e := mismatch(JSON_NAN, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NAN, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_INFINITY[0]: + if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { + buffer := make([]byte, len(JSON_INFINITY)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_INFINITY != string(buffer) { + e := mismatch(JSON_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return INFINITY, nil + } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { + buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) + buffer[0] = JSON_NEGATIVE_INFINITY[0] + buffer[1] = c + _, e := p.reader.Read(buffer[2:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NEGATIVE_INFINITY != string(buffer) { + e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NEGATIVE_INFINITY, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_QUOTE: + if !inQuotes { + inQuotes = true + } else { + break + } + default: + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + if buf.Len() == 0 { + e := fmt.Errorf("Unable to parse number from empty string ''") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return NewNumericFromJSONString(buf.String(), false), nil +} + +// Safely peeks into the buffer, reading only what is necessary +func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { + for i := 0; i < len(b); i++ { + a, _ := p.reader.Peek(i + 1) + if len(a) < (i+1) || a[i] != b[i] { + return false + } + } + return true +} + +// Reset the context stack to its initial state. +func (p *TSimpleJSONProtocol) resetContextStack() { + p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL} + p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL} +} + +func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { + n, err := p.writer.Write(b) + if err != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + } + return n, err +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) +} + +var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go new file mode 100644 index 00000000000..563cbfc694a --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go @@ -0,0 +1,332 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" +) + +// ErrAbandonRequest is a special error server handler implementations can +// return to indicate that the request has been abandoned. +// +// TSimpleServer will check for this error, and close the client connection +// instead of writing the response/error back to the client. +// +// It shall only be used when the server handler implementation know that the +// client already abandoned the request (by checking that the passed in context +// is already canceled, for example). +var ErrAbandonRequest = errors.New("request abandoned") + +// ServerConnectivityCheckInterval defines the ticker interval used by +// connectivity check in thrift compiled TProcessorFunc implementations. +// +// It's defined as a variable instead of constant, so that thrift server +// implementations can change its value to control the behavior. +// +// If it's changed to <=0, the feature will be disabled. +var ServerConnectivityCheckInterval = time.Millisecond * 5 + +/* + * This is not a typical TSimpleServer as it is not blocked after accept a socket. + * It is more like a TThreadedServer that can handle different connections in different goroutines. + * This will work if golang user implements a conn-pool like thing in client side. + */ +type TSimpleServer struct { + closed int32 + wg sync.WaitGroup + mu sync.Mutex + + processorFactory TProcessorFactory + serverTransport TServerTransport + inputTransportFactory TTransportFactory + outputTransportFactory TTransportFactory + inputProtocolFactory TProtocolFactory + outputProtocolFactory TProtocolFactory + + // Headers to auto forward in THeaderProtocol + forwardHeaders []string + + logger Logger +} + +func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) +} + +func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory4(NewTProcessorFactory(processor), + serverTransport, + transportFactory, + protocolFactory, + ) +} + +func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(NewTProcessorFactory(processor), + serverTransport, + inputTransportFactory, + outputTransportFactory, + inputProtocolFactory, + outputProtocolFactory, + ) +} + +func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + NewTTransportFactory(), + NewTTransportFactory(), + NewTBinaryProtocolFactoryDefault(), + NewTBinaryProtocolFactoryDefault(), + ) +} + +func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + transportFactory, + transportFactory, + protocolFactory, + protocolFactory, + ) +} + +func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return &TSimpleServer{ + processorFactory: processorFactory, + serverTransport: serverTransport, + inputTransportFactory: inputTransportFactory, + outputTransportFactory: outputTransportFactory, + inputProtocolFactory: inputProtocolFactory, + outputProtocolFactory: outputProtocolFactory, + } +} + +func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { + return p.processorFactory +} + +func (p *TSimpleServer) ServerTransport() TServerTransport { + return p.serverTransport +} + +func (p *TSimpleServer) InputTransportFactory() TTransportFactory { + return p.inputTransportFactory +} + +func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { + return p.outputTransportFactory +} + +func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { + return p.inputProtocolFactory +} + +func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { + return p.outputProtocolFactory +} + +func (p *TSimpleServer) Listen() error { + return p.serverTransport.Listen() +} + +// SetForwardHeaders sets the list of header keys that will be auto forwarded +// while using THeaderProtocol. +// +// "forward" means that when the server is also a client to other upstream +// thrift servers, the context object user gets in the processor functions will +// have both read and write headers set, with write headers being forwarded. +// Users can always override the write headers by calling SetWriteHeaderList +// before calling thrift client functions. +func (p *TSimpleServer) SetForwardHeaders(headers []string) { + size := len(headers) + if size == 0 { + p.forwardHeaders = nil + return + } + + keys := make([]string, size) + copy(keys, headers) + p.forwardHeaders = keys +} + +// SetLogger sets the logger used by this TSimpleServer. +// +// If no logger was set before Serve is called, a default logger using standard +// log library will be used. +func (p *TSimpleServer) SetLogger(logger Logger) { + p.logger = logger +} + +func (p *TSimpleServer) innerAccept() (int32, error) { + client, err := p.serverTransport.Accept() + p.mu.Lock() + defer p.mu.Unlock() + closed := atomic.LoadInt32(&p.closed) + if closed != 0 { + return closed, nil + } + if err != nil { + return 0, err + } + if client != nil { + p.wg.Add(1) + go func() { + defer p.wg.Done() + if err := p.processRequests(client); err != nil { + p.logger(fmt.Sprintf("error processing request: %v", err)) + } + }() + } + return 0, nil +} + +func (p *TSimpleServer) AcceptLoop() error { + for { + closed, err := p.innerAccept() + if err != nil { + return err + } + if closed != 0 { + return nil + } + } +} + +func (p *TSimpleServer) Serve() error { + p.logger = fallbackLogger(p.logger) + + err := p.Listen() + if err != nil { + return err + } + p.AcceptLoop() + return nil +} + +func (p *TSimpleServer) Stop() error { + p.mu.Lock() + defer p.mu.Unlock() + if atomic.LoadInt32(&p.closed) != 0 { + return nil + } + atomic.StoreInt32(&p.closed, 1) + p.serverTransport.Interrupt() + p.wg.Wait() + return nil +} + +// If err is actually EOF, return nil, otherwise return err as-is. +func treatEOFErrorsAsNil(err error) error { + if err == nil { + return nil + } + if errors.Is(err, io.EOF) { + return nil + } + var te TTransportException + if errors.As(err, &te) && te.TypeId() == END_OF_FILE { + return nil + } + return err +} + +func (p *TSimpleServer) processRequests(client TTransport) (err error) { + defer func() { + err = treatEOFErrorsAsNil(err) + }() + + processor := p.processorFactory.GetProcessor(client) + inputTransport, err := p.inputTransportFactory.GetTransport(client) + if err != nil { + return err + } + inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) + var outputTransport TTransport + var outputProtocol TProtocol + + // for THeaderProtocol, we must use the same protocol instance for + // input and output so that the response is in the same dialect that + // the server detected the request was in. + headerProtocol, ok := inputProtocol.(*THeaderProtocol) + if ok { + outputProtocol = inputProtocol + } else { + oTrans, err := p.outputTransportFactory.GetTransport(client) + if err != nil { + return err + } + outputTransport = oTrans + outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport) + } + + if inputTransport != nil { + defer inputTransport.Close() + } + if outputTransport != nil { + defer outputTransport.Close() + } + for { + if atomic.LoadInt32(&p.closed) != 0 { + return nil + } + + ctx := SetResponseHelper( + defaultCtx, + TResponseHelper{ + THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol), + }, + ) + if headerProtocol != nil { + // We need to call ReadFrame here, otherwise we won't + // get any headers on the AddReadTHeaderToContext call. + // + // ReadFrame is safe to be called multiple times so it + // won't break when it's called again later when we + // actually start to read the message. + if err := headerProtocol.ReadFrame(ctx); err != nil { + return err + } + ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders()) + ctx = SetWriteHeaderList(ctx, p.forwardHeaders) + } + + ok, err := processor.Process(ctx, inputProtocol, outputProtocol) + if errors.Is(err, ErrAbandonRequest) { + return client.Close() + } + if errors.As(err, new(TTransportException)) && err != nil { + return err + } + var tae TApplicationException + if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD { + continue + } + if !ok { + break + } + } + return nil +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go new file mode 100644 index 00000000000..e911bf16681 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "net" + "time" +) + +type TSocket struct { + conn *socketConn + addr net.Addr + cfg *TConfiguration + + connectTimeout time.Duration + socketTimeout time.Duration +} + +// Deprecated: Use NewTSocketConf instead. +func NewTSocket(hostPort string) (*TSocket, error) { + return NewTSocketConf(hostPort, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port. +// +// Example: +// +// trans, err := thrift.NewTSocketConf("localhost:9090", &TConfiguration{ +// ConnectTimeout: time.Second, // Use 0 for no timeout +// SocketTimeout: time.Second, // Use 0 for no timeout +// }) +func NewTSocketConf(hostPort string, conf *TConfiguration) (*TSocket, error) { + addr, err := net.ResolveTCPAddr("tcp", hostPort) + if err != nil { + return nil, err + } + return NewTSocketFromAddrConf(addr, conf), nil +} + +// Deprecated: Use NewTSocketConf instead. +func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) { + return NewTSocketConf(hostPort, &TConfiguration{ + ConnectTimeout: connTimeout, + SocketTimeout: soTimeout, + + noPropagation: true, + }) +} + +// NewTSocketFromAddrConf creates a TSocket from a net.Addr +func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket { + return &TSocket{ + addr: addr, + cfg: conf, + } +} + +// Deprecated: Use NewTSocketFromAddrConf instead. +func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket { + return NewTSocketFromAddrConf(addr, &TConfiguration{ + ConnectTimeout: connTimeout, + SocketTimeout: soTimeout, + + noPropagation: true, + }) +} + +// NewTSocketFromConnConf creates a TSocket from an existing net.Conn. +func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket { + return &TSocket{ + conn: wrapSocketConn(conn), + addr: conn.RemoteAddr(), + cfg: conf, + } +} + +// Deprecated: Use NewTSocketFromConnConf instead. +func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket { + return NewTSocketFromConnConf(conn, &TConfiguration{ + SocketTimeout: socketTimeout, + + noPropagation: true, + }) +} + +// SetTConfiguration implements TConfigurationSetter. +// +// It can be used to set connect and socket timeouts. +func (p *TSocket) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +// Sets the connect timeout +func (p *TSocket) SetConnTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{ + noPropagation: true, + } + } + p.cfg.ConnectTimeout = timeout + return nil +} + +// Sets the socket timeout +func (p *TSocket) SetSocketTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{ + noPropagation: true, + } + } + p.cfg.SocketTimeout = timeout + return nil +} + +func (p *TSocket) pushDeadline(read, write bool) { + var t time.Time + if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { + t = time.Now().Add(time.Duration(timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSocket) Open() error { + if p.conn.isValid() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + var err error + if p.conn, err = createSocketConnFromReturn(net.DialTimeout( + p.addr.Network(), + p.addr.String(), + p.cfg.GetConnectTimeout(), + )); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSocket) IsOpen() bool { + return p.conn.IsOpen() +} + +// Closes the socket. +func (p *TSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +//Returns the remote address of the socket. +func (p *TSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSocket) Read(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between + // p.pushDeadline and p.conn.Read could cause the deadline set inside + // p.pushDeadline being reset, thus need to be avoided. + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSocket) Write(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSocket) Interrupt() error { + if !p.conn.isValid() { + return nil + } + return p.conn.Close() +} + +func (p *TSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +var _ TConfigurationSetter = (*TSocket)(nil) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go new file mode 100644 index 00000000000..c1cc30c6cc5 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net" +) + +// socketConn is a wrapped net.Conn that tries to do connectivity check. +type socketConn struct { + net.Conn + + buffer [1]byte +} + +var _ net.Conn = (*socketConn)(nil) + +// createSocketConnFromReturn is a language sugar to help create socketConn from +// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc. +func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) { + if err != nil { + return nil, err + } + return &socketConn{ + Conn: conn, + }, nil +} + +// wrapSocketConn wraps an existing net.Conn into *socketConn. +func wrapSocketConn(conn net.Conn) *socketConn { + // In case conn is already wrapped, + // return it as-is and avoid double wrapping. + if sc, ok := conn.(*socketConn); ok { + return sc + } + + return &socketConn{ + Conn: conn, + } +} + +// isValid checks whether there's a valid connection. +// +// It's nil safe, and returns false if sc itself is nil, or if the underlying +// connection is nil. +// +// It's the same as the previous implementation of TSocket.IsOpen and +// TSSLSocket.IsOpen before we added connectivity check. +func (sc *socketConn) isValid() bool { + return sc != nil && sc.Conn != nil +} + +// IsOpen checks whether the connection is open. +// +// It's nil safe, and returns false if sc itself is nil, or if the underlying +// connection is nil. +// +// Otherwise, it tries to do a connectivity check and returns the result. +// +// It also has the side effect of resetting the previously set read deadline on +// the socket. As a result, it shouldn't be called between setting read deadline +// and doing actual read. +func (sc *socketConn) IsOpen() bool { + if !sc.isValid() { + return false + } + return sc.checkConn() == nil +} + +// Read implements io.Reader. +// +// On Windows, it behaves the same as the underlying net.Conn.Read. +// +// On non-Windows, it treats len(p) == 0 as a connectivity check instead of +// readability check, which means instead of blocking until there's something to +// read (readability check), or always return (0, nil) (the default behavior of +// go's stdlib implementation on non-Windows), it never blocks, and will return +// an error if the connection is lost. +func (sc *socketConn) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, sc.read0() + } + + return sc.Conn.Read(p) +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go new file mode 100644 index 00000000000..f5fab3ab653 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go @@ -0,0 +1,83 @@ +// +build !windows + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" + "syscall" + "time" +) + +// We rely on this variable to be the zero time, +// but define it as global variable to avoid repetitive allocations. +// Please DO NOT mutate this variable in any way. +var zeroTime time.Time + +func (sc *socketConn) read0() error { + return sc.checkConn() +} + +func (sc *socketConn) checkConn() error { + syscallConn, ok := sc.Conn.(syscall.Conn) + if !ok { + // No way to check, return nil + return nil + } + + // The reading about to be done here is non-blocking so we don't really + // need a read deadline. We just need to clear the previously set read + // deadline, if any. + sc.Conn.SetReadDeadline(zeroTime) + + rc, err := syscallConn.SyscallConn() + if err != nil { + return err + } + + var n int + + if readErr := rc.Read(func(fd uintptr) bool { + n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT) + return true + }); readErr != nil { + return readErr + } + + if n > 0 { + // We got something, which means we are good + return nil + } + + if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) { + // This means the connection is still open but we don't have + // anything to read right now. + return nil + } + + if err != nil { + return err + } + + // At this point, it means the other side already closed the connection. + return io.EOF +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go new file mode 100644 index 00000000000..679838c3b64 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go @@ -0,0 +1,34 @@ +// +build windows + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +func (sc *socketConn) read0() error { + // On windows, we fallback to the default behavior of reading 0 bytes. + var p []byte + _, err := sc.Conn.Read(p) + return err +} + +func (sc *socketConn) checkConn() error { + // On windows, we always return nil for this check. + return nil +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go new file mode 100644 index 00000000000..907afca326f --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "net" + "time" +) + +type TSSLServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + interrupted bool + cfg *tls.Config +} + +func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { + return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) +} + +func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { + if cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil +} + +func (p *TSSLServerSocket) Listen() error { + if p.IsListening() { + return nil + } + l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TSSLServerSocket) Accept() (TTransport, error) { + if p.interrupted { + return nil, errTransportInterrupted + } + if p.listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + conn, err := p.listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TSSLServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLServerSocket) Open() error { + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TSSLServerSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSSLServerSocket) Close() error { + defer func() { + p.listener = nil + }() + if p.IsListening() { + return p.listener.Close() + } + return nil +} + +func (p *TSSLServerSocket) Interrupt() error { + p.interrupted = true + return nil +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go new file mode 100644 index 00000000000..6359a74ceb2 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go @@ -0,0 +1,258 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "crypto/tls" + "net" + "time" +) + +type TSSLSocket struct { + conn *socketConn + // hostPort contains host:port (e.g. "asdf.com:12345"). The field is + // only valid if addr is nil. + hostPort string + // addr is nil when hostPort is not "", and is only used when the + // TSSLSocket is constructed from a net.Addr. + addr net.Addr + + cfg *TConfiguration +} + +// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port. +// +// Example: +// +// trans, err := thrift.NewTSSLSocketConf("localhost:9090", nil, &TConfiguration{ +// ConnectTimeout: time.Second, // Use 0 for no timeout +// SocketTimeout: time.Second, // Use 0 for no timeout +// }) +func NewTSSLSocketConf(hostPort string, conf *TConfiguration) (*TSSLSocket, error) { + if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + return &TSSLSocket{ + hostPort: hostPort, + cfg: conf, + }, nil +} + +// Deprecated: Use NewTSSLSocketConf instead. +func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { + return NewTSSLSocketConf(hostPort, &TConfiguration{ + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTSSLSocketConf instead. +func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) { + return NewTSSLSocketConf(hostPort, &TConfiguration{ + ConnectTimeout: connectTimeout, + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr. +func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket { + return &TSSLSocket{ + addr: addr, + cfg: conf, + } +} + +// Deprecated: Use NewTSSLSocketFromAddrConf instead. +func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket { + return NewTSSLSocketFromAddrConf(addr, &TConfiguration{ + ConnectTimeout: connectTimeout, + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn. +func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket { + return &TSSLSocket{ + conn: wrapSocketConn(conn), + addr: conn.RemoteAddr(), + cfg: conf, + } +} + +// Deprecated: Use NewTSSLSocketFromConnConf instead. +func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket { + return NewTSSLSocketFromConnConf(conn, &TConfiguration{ + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// SetTConfiguration implements TConfigurationSetter. +// +// It can be used to change connect and socket timeouts. +func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +// Sets the connect timeout +func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{} + } + p.cfg.ConnectTimeout = timeout + return nil +} + +// Sets the socket timeout +func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{} + } + p.cfg.SocketTimeout = timeout + return nil +} + +func (p *TSSLSocket) pushDeadline(read, write bool) { + var t time.Time + if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { + t = time.Now().Add(time.Duration(timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLSocket) Open() error { + var err error + // If we have a hostname, we need to pass the hostname to tls.Dial for + // certificate hostname checks. + if p.hostPort != "" { + if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( + &net.Dialer{ + Timeout: p.cfg.GetConnectTimeout(), + }, + "tcp", + p.hostPort, + p.cfg.GetTLSConfig(), + )); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } else { + if p.conn.isValid() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( + &net.Dialer{ + Timeout: p.cfg.GetConnectTimeout(), + }, + p.addr.Network(), + p.addr.String(), + p.cfg.GetTLSConfig(), + )); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSSLSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSSLSocket) IsOpen() bool { + return p.conn.IsOpen() +} + +// Closes the socket. +func (p *TSSLSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +func (p *TSSLSocket) Read(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between + // p.pushDeadline and p.conn.Read could cause the deadline set inside + // p.pushDeadline being reset, thus need to be avoided. + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSSLSocket) Write(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSSLSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSSLSocket) Interrupt() error { + if !p.conn.isValid() { + return nil + } + return p.conn.Close() +} + +func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +var _ TConfigurationSetter = (*TSSLSocket)(nil) diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go new file mode 100644 index 00000000000..ba2738a8df1 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" + "io" +) + +var errTransportInterrupted = errors.New("Transport Interrupted") + +type Flusher interface { + Flush() (err error) +} + +type ContextFlusher interface { + Flush(ctx context.Context) (err error) +} + +type ReadSizeProvider interface { + RemainingBytes() (num_bytes uint64) +} + +// Encapsulates the I/O layer +type TTransport interface { + io.ReadWriteCloser + ContextFlusher + ReadSizeProvider + + // Opens the transport for communication + Open() error + + // Returns true if the transport is open + IsOpen() bool +} + +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// This is "enchanced" transport with extra capabilities. You need to use one of these +// to construct protocol. +// Notably, TSocket does not implement this interface, and it is always a mistake to use +// TSocket directly in protocol. +type TRichTransport interface { + io.ReadWriter + io.ByteReader + io.ByteWriter + stringWriter + ContextFlusher + ReadSizeProvider +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go new file mode 100644 index 00000000000..0a3f07646d3 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +type timeoutable interface { + Timeout() bool +} + +// Thrift Transport exception +type TTransportException interface { + TException + TypeId() int + Err() error +} + +const ( + UNKNOWN_TRANSPORT_EXCEPTION = 0 + NOT_OPEN = 1 + ALREADY_OPEN = 2 + TIMED_OUT = 3 + END_OF_FILE = 4 +) + +type tTransportException struct { + typeId int + err error + msg string +} + +var _ TTransportException = (*tTransportException)(nil) + +func (tTransportException) TExceptionType() TExceptionType { + return TExceptionTypeTransport +} + +func (p *tTransportException) TypeId() int { + return p.typeId +} + +func (p *tTransportException) Error() string { + return p.msg +} + +func (p *tTransportException) Err() error { + return p.err +} + +func (p *tTransportException) Unwrap() error { + return p.err +} + +func (p *tTransportException) Timeout() bool { + return p.typeId == TIMED_OUT +} + +func NewTTransportException(t int, e string) TTransportException { + return &tTransportException{ + typeId: t, + err: errors.New(e), + msg: e, + } +} + +func NewTTransportExceptionFromError(e error) TTransportException { + if e == nil { + return nil + } + + if t, ok := e.(TTransportException); ok { + return t + } + + te := &tTransportException{ + typeId: UNKNOWN_TRANSPORT_EXCEPTION, + err: e, + msg: e.Error(), + } + + if isTimeoutError(e) { + te.typeId = TIMED_OUT + return te + } + + if errors.Is(e, io.EOF) { + te.typeId = END_OF_FILE + return te + } + + return te +} + +func prependTTransportException(prepend string, e TTransportException) TTransportException { + return &tTransportException{ + typeId: e.TypeId(), + err: e, + msg: prepend + e.Error(), + } +} + +// isTimeoutError returns true when err is an error caused by timeout. +// +// Note that this also includes TTransportException wrapped timeout errors. +func isTimeoutError(err error) bool { + var t timeoutable + if errors.As(err, &t) { + return t.Timeout() + } + return false +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go new file mode 100644 index 00000000000..c805807940a --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory class used to create wrapped instance of Transports. +// This is used primarily in servers, which get Transports from +// a ServerTransport and then may want to mutate them (i.e. create +// a BufferedTransport from the underlying base transport) +type TTransportFactory interface { + GetTransport(trans TTransport) (TTransport, error) +} + +type tTransportFactory struct{} + +// Return a wrapped instance of the base Transport. +func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return trans, nil +} + +func NewTTransportFactory() TTransportFactory { + return &tTransportFactory{} +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go new file mode 100644 index 00000000000..4292ffcadb1 --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Type constants in the Thrift protocol +type TType byte + +const ( + STOP = 0 + VOID = 1 + BOOL = 2 + BYTE = 3 + I08 = 3 + DOUBLE = 4 + I16 = 6 + I32 = 8 + I64 = 10 + STRING = 11 + UTF7 = 11 + STRUCT = 12 + MAP = 13 + SET = 14 + LIST = 15 + UTF8 = 16 + UTF16 = 17 + //BINARY = 18 wrong and unusued +) + +var typeNames = map[int]string{ + STOP: "STOP", + VOID: "VOID", + BOOL: "BOOL", + BYTE: "BYTE", + DOUBLE: "DOUBLE", + I16: "I16", + I32: "I32", + I64: "I64", + STRING: "STRING", + STRUCT: "STRUCT", + MAP: "MAP", + SET: "SET", + LIST: "LIST", + UTF8: "UTF8", + UTF16: "UTF16", +} + +func (p TType) String() string { + if s, ok := typeNames[int(p)]; ok { + return s + } + return "Unknown" +} diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go new file mode 100644 index 00000000000..259943a627c --- /dev/null +++ b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go @@ -0,0 +1,137 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. + */ + +package thrift + +import ( + "compress/zlib" + "context" + "io" +) + +// TZlibTransportFactory is a factory for TZlibTransport instances +type TZlibTransportFactory struct { + level int + factory TTransportFactory +} + +// TZlibTransport is a TTransport implementation that makes use of zlib compression. +type TZlibTransport struct { + reader io.ReadCloser + transport TTransport + writer *zlib.Writer +} + +// GetTransport constructs a new instance of NewTZlibTransport +func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if p.factory != nil { + // wrap other factory + var err error + trans, err = p.factory.GetTransport(trans) + if err != nil { + return nil, err + } + } + return NewTZlibTransport(trans, p.level) +} + +// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory +func NewTZlibTransportFactory(level int) *TZlibTransportFactory { + return &TZlibTransportFactory{level: level, factory: nil} +} + +// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory +// as a wrapper over existing transport factory +func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory { + return &TZlibTransportFactory{level: level, factory: factory} +} + +// NewTZlibTransport constructs a new instance of TZlibTransport +func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { + w, err := zlib.NewWriterLevel(trans, level) + if err != nil { + return nil, err + } + + return &TZlibTransport{ + writer: w, + transport: trans, + }, nil +} + +// Close closes the reader and writer (flushing any unwritten data) and closes +// the underlying transport. +func (z *TZlibTransport) Close() error { + if z.reader != nil { + if err := z.reader.Close(); err != nil { + return err + } + } + if err := z.writer.Close(); err != nil { + return err + } + return z.transport.Close() +} + +// Flush flushes the writer and its underlying transport. +func (z *TZlibTransport) Flush(ctx context.Context) error { + if err := z.writer.Flush(); err != nil { + return err + } + return z.transport.Flush(ctx) +} + +// IsOpen returns true if the transport is open +func (z *TZlibTransport) IsOpen() bool { + return z.transport.IsOpen() +} + +// Open opens the transport for communication +func (z *TZlibTransport) Open() error { + return z.transport.Open() +} + +func (z *TZlibTransport) Read(p []byte) (int, error) { + if z.reader == nil { + r, err := zlib.NewReader(z.transport) + if err != nil { + return 0, NewTTransportExceptionFromError(err) + } + z.reader = r + } + + return z.reader.Read(p) +} + +// RemainingBytes returns the size in bytes of the data that is still to be +// read. +func (z *TZlibTransport) RemainingBytes() uint64 { + return z.transport.RemainingBytes() +} + +func (z *TZlibTransport) Write(p []byte) (int, error) { + return z.writer.Write(p) +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(z.transport, conf) +} + +var _ TConfigurationSetter = (*TZlibTransport)(nil) diff --git a/exporters/jaeger/jaeger.go b/exporters/jaeger/jaeger.go new file mode 100644 index 00000000000..10ef350dc1d --- /dev/null +++ b/exporters/jaeger/jaeger.go @@ -0,0 +1,346 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" +) + +const ( + keyInstrumentationLibraryName = "otel.library.name" + keyInstrumentationLibraryVersion = "otel.library.version" + keyError = "error" + keySpanKind = "span.kind" + keyStatusCode = "otel.status_code" + keyStatusMessage = "otel.status_description" + keyDroppedAttributeCount = "otel.event.dropped_attributes_count" + keyEventName = "event" +) + +// New returns an OTel Exporter implementation that exports the collected +// spans to Jaeger. +func New(endpointOption EndpointOption) (*Exporter, error) { + uploader, err := endpointOption.newBatchUploader() + if err != nil { + return nil, err + } + + // Fetch default service.name from default resource for backup + var defaultServiceName string + defaultResource := resource.Default() + if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists { + defaultServiceName = value.AsString() + } + if defaultServiceName == "" { + return nil, fmt.Errorf("failed to get service name from default resource") + } + + stopCh := make(chan struct{}) + e := &Exporter{ + uploader: uploader, + stopCh: stopCh, + defaultServiceName: defaultServiceName, + } + return e, nil +} + +// Exporter exports OpenTelemetry spans to a Jaeger agent or collector. +type Exporter struct { + uploader batchUploader + stopOnce sync.Once + stopCh chan struct{} + defaultServiceName string +} + +var _ sdktrace.SpanExporter = (*Exporter)(nil) + +// ExportSpans transforms and exports OpenTelemetry spans to Jaeger. +func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + // Return fast if context is already canceled or Exporter shutdown. + select { + case <-ctx.Done(): + return ctx.Err() + case <-e.stopCh: + return nil + default: + } + + // Cancel export if Exporter is shutdown. + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + go func(ctx context.Context, cancel context.CancelFunc) { + select { + case <-ctx.Done(): + case <-e.stopCh: + cancel() + } + }(ctx, cancel) + + for _, batch := range jaegerBatchList(spans, e.defaultServiceName) { + if err := e.uploader.upload(ctx, batch); err != nil { + return err + } + } + + return nil +} + +// Shutdown stops the Exporter. This will close all connections and release +// all resources held by the Exporter. +func (e *Exporter) Shutdown(ctx context.Context) error { + // Stop any active and subsequent exports. + e.stopOnce.Do(func() { close(e.stopCh) }) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return e.uploader.shutdown(ctx) +} + +func spanToThrift(ss sdktrace.ReadOnlySpan) *gen.Span { + attr := ss.Attributes() + tags := make([]*gen.Tag, 0, len(attr)) + for _, kv := range attr { + tag := keyValueToTag(kv) + if tag != nil { + tags = append(tags, tag) + } + } + + if il := ss.InstrumentationLibrary(); il.Name != "" { + tags = append(tags, getStringTag(keyInstrumentationLibraryName, il.Name)) + if il.Version != "" { + tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, il.Version)) + } + } + + if ss.SpanKind() != trace.SpanKindInternal { + tags = append(tags, + getStringTag(keySpanKind, ss.SpanKind().String()), + ) + } + + if ss.Status().Code != codes.Unset { + tags = append(tags, getInt64Tag(keyStatusCode, int64(ss.Status().Code))) + if ss.Status().Description != "" { + tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description)) + } + + if ss.Status().Code == codes.Error { + tags = append(tags, getBoolTag(keyError, true)) + } + } + + var logs []*gen.Log + for _, a := range ss.Events() { + nTags := len(a.Attributes) + if a.Name != "" { + nTags++ + } + if a.DroppedAttributeCount != 0 { + nTags++ + } + fields := make([]*gen.Tag, 0, nTags) + if a.Name != "" { + // If an event contains an attribute with the same key, it needs + // to be given precedence and overwrite this. + fields = append(fields, getStringTag(keyEventName, a.Name)) + } + for _, kv := range a.Attributes { + tag := keyValueToTag(kv) + if tag != nil { + fields = append(fields, tag) + } + } + if a.DroppedAttributeCount != 0 { + fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount))) + } + logs = append(logs, &gen.Log{ + Timestamp: a.Time.UnixNano() / 1000, + Fields: fields, + }) + } + + var refs []*gen.SpanRef + for _, link := range ss.Links() { + tid := link.SpanContext.TraceID() + sid := link.SpanContext.SpanID() + refs = append(refs, &gen.SpanRef{ + TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), + TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), + SpanId: int64(binary.BigEndian.Uint64(sid[:])), + RefType: gen.SpanRefType_FOLLOWS_FROM, + }) + } + + tid := ss.SpanContext().TraceID() + sid := ss.SpanContext().SpanID() + psid := ss.Parent().SpanID() + return &gen.Span{ + TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), + TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), + SpanId: int64(binary.BigEndian.Uint64(sid[:])), + ParentSpanId: int64(binary.BigEndian.Uint64(psid[:])), + OperationName: ss.Name(), // TODO: if span kind is added then add prefix "Sent"/"Recv" + Flags: int32(ss.SpanContext().TraceFlags()), + StartTime: ss.StartTime().UnixNano() / 1000, + Duration: ss.EndTime().Sub(ss.StartTime()).Nanoseconds() / 1000, + Tags: tags, + Logs: logs, + References: refs, + } +} + +func keyValueToTag(keyValue attribute.KeyValue) *gen.Tag { + var tag *gen.Tag + switch keyValue.Value.Type() { + case attribute.STRING: + s := keyValue.Value.AsString() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VStr: &s, + VType: gen.TagType_STRING, + } + case attribute.BOOL: + b := keyValue.Value.AsBool() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VBool: &b, + VType: gen.TagType_BOOL, + } + case attribute.INT64: + i := keyValue.Value.AsInt64() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VLong: &i, + VType: gen.TagType_LONG, + } + case attribute.FLOAT64: + f := keyValue.Value.AsFloat64() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VDouble: &f, + VType: gen.TagType_DOUBLE, + } + case attribute.ARRAY: + json, _ := json.Marshal(keyValue.Value.AsArray()) + a := (string)(json) + tag = &gen.Tag{ + Key: string(keyValue.Key), + VStr: &a, + VType: gen.TagType_STRING, + } + } + return tag +} + +func getInt64Tag(k string, i int64) *gen.Tag { + return &gen.Tag{ + Key: k, + VLong: &i, + VType: gen.TagType_LONG, + } +} + +func getStringTag(k, s string) *gen.Tag { + return &gen.Tag{ + Key: k, + VStr: &s, + VType: gen.TagType_STRING, + } +} + +func getBoolTag(k string, b bool) *gen.Tag { + return &gen.Tag{ + Key: k, + VBool: &b, + VType: gen.TagType_BOOL, + } +} + +// jaegerBatchList transforms a slice of spans into a slice of jaeger Batch. +func jaegerBatchList(ssl []sdktrace.ReadOnlySpan, defaultServiceName string) []*gen.Batch { + if len(ssl) == 0 { + return nil + } + + batchDict := make(map[attribute.Distinct]*gen.Batch) + + for _, ss := range ssl { + if ss == nil { + continue + } + + resourceKey := ss.Resource().Equivalent() + batch, bOK := batchDict[resourceKey] + if !bOK { + batch = &gen.Batch{ + Process: process(ss.Resource(), defaultServiceName), + Spans: []*gen.Span{}, + } + } + batch.Spans = append(batch.Spans, spanToThrift(ss)) + batchDict[resourceKey] = batch + } + + // Transform the categorized map into a slice + batchList := make([]*gen.Batch, 0, len(batchDict)) + for _, batch := range batchDict { + batchList = append(batchList, batch) + } + return batchList +} + +// process transforms an OTel Resource into a jaeger Process. +func process(res *resource.Resource, defaultServiceName string) *gen.Process { + var process gen.Process + + var serviceName attribute.KeyValue + if res != nil { + for iter := res.Iter(); iter.Next(); { + if iter.Attribute().Key == semconv.ServiceNameKey { + serviceName = iter.Attribute() + // Don't convert service.name into tag. + continue + } + if tag := keyValueToTag(iter.Attribute()); tag != nil { + process.Tags = append(process.Tags, tag) + } + } + } + + // If no service.name is contained in a Span's Resource, + // that field MUST be populated from the default Resource. + if serviceName.Value.AsString() == "" { + serviceName = semconv.ServiceNameKey.String(defaultServiceName) + } + process.ServiceName = serviceName.Value.AsString() + + return &process +} diff --git a/exporters/jaeger/jaeger_benchmark_test.go b/exporters/jaeger/jaeger_benchmark_test.go new file mode 100644 index 00000000000..ab96ac8c9ed --- /dev/null +++ b/exporters/jaeger/jaeger_benchmark_test.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "context" + "fmt" + "testing" + "time" + + "go.opentelemetry.io/otel/sdk/instrumentation" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + "go.opentelemetry.io/otel/trace" +) + +var ( + traceID trace.TraceID + spanID trace.SpanID + spanContext trace.SpanContext + + instrLibName = "benchmark.tests" +) + +func init() { + var err error + traceID, err = trace.TraceIDFromHex("0102030405060708090a0b0c0d0e0f10") + if err != nil { + panic(err) + } + spanID, err = trace.SpanIDFromHex("0102030405060708") + if err != nil { + panic(err) + } + spanContext = trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: spanID, + }) +} + +func spans(n int) []tracesdk.ReadOnlySpan { + now := time.Now() + s := make(tracetest.SpanStubs, n) + for i := 0; i < n; i++ { + name := fmt.Sprintf("span %d", i) + s[i] = tracetest.SpanStub{ + SpanContext: spanContext, + Name: name, + StartTime: now, + EndTime: now, + SpanKind: trace.SpanKindClient, + InstrumentationLibrary: instrumentation.Library{ + Name: instrLibName, + }, + } + } + return s.Snapshots() +} + +func benchmarkExportSpans(b *testing.B, o EndpointOption, i int) { + ctx := context.Background() + s := spans(i) + exp, err := New(o) + if err != nil { + b.Fatal(err) + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + if err := exp.ExportSpans(ctx, s); err != nil { + b.Error(err) + } + } +} + +func benchmarkCollector(b *testing.B, i int) { + benchmarkExportSpans(b, withTestCollectorEndpoint(), i) +} + +func benchmarkAgent(b *testing.B, i int) { + benchmarkExportSpans(b, WithAgentEndpoint(), i) +} + +func BenchmarkCollectorExportSpans1(b *testing.B) { benchmarkCollector(b, 1) } +func BenchmarkCollectorExportSpans10(b *testing.B) { benchmarkCollector(b, 10) } +func BenchmarkCollectorExportSpans100(b *testing.B) { benchmarkCollector(b, 100) } +func BenchmarkCollectorExportSpans1000(b *testing.B) { benchmarkCollector(b, 1000) } +func BenchmarkCollectorExportSpans10000(b *testing.B) { benchmarkCollector(b, 10000) } +func BenchmarkAgentExportSpans1(b *testing.B) { benchmarkAgent(b, 1) } +func BenchmarkAgentExportSpans10(b *testing.B) { benchmarkAgent(b, 10) } +func BenchmarkAgentExportSpans100(b *testing.B) { benchmarkAgent(b, 100) } + +/* +* BUG: These tests are not possible currently because the thrift payload size +* does not fit in a UDP packet with the default size (65000) and will return +* an error. + +func BenchmarkAgentExportSpans1000(b *testing.B) { benchmarkAgent(b, 1000) } +func BenchmarkAgentExportSpans10000(b *testing.B) { benchmarkAgent(b, 10000) } + +*/ diff --git a/exporters/jaeger/jaeger_test.go b/exporters/jaeger/jaeger_test.go new file mode 100644 index 00000000000..4db4a36ddef --- /dev/null +++ b/exporters/jaeger/jaeger_test.go @@ -0,0 +1,723 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "context" + "encoding/binary" + "fmt" + "os" + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + ottest "go.opentelemetry.io/otel/internal/internaltest" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" +) + +func TestNewRawExporter(t *testing.T) { + testCases := []struct { + name string + endpoint EndpointOption + }{ + { + name: "default exporter with collector endpoint", + endpoint: WithCollectorEndpoint(), + }, + { + name: "default exporter with agent endpoint", + endpoint: WithAgentEndpoint(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := New(tc.endpoint) + assert.NoError(t, err) + }) + } +} + +func TestNewRawExporterUseEnvVarIfOptionUnset(t *testing.T) { + // Record and restore env + envStore := ottest.NewEnvStore() + envStore.Record(envEndpoint) + defer func() { + require.NoError(t, envStore.Restore()) + }() + + // If the user sets the environment variable OTEL_EXPORTER_JAEGER_ENDPOINT, endpoint will always get a value. + require.NoError(t, os.Unsetenv(envEndpoint)) + _, err := New( + WithCollectorEndpoint(), + ) + + assert.NoError(t, err) +} + +type testCollectorEndpoint struct { + batchesUploaded []*gen.Batch +} + +func (c *testCollectorEndpoint) shutdown(context.Context) error { + return nil +} + +func (c *testCollectorEndpoint) upload(_ context.Context, batch *gen.Batch) error { + c.batchesUploaded = append(c.batchesUploaded, batch) + return nil +} + +var _ batchUploader = (*testCollectorEndpoint)(nil) + +func withTestCollectorEndpoint() EndpointOption { + return endpointOptionFunc(func() (batchUploader, error) { + return &testCollectorEndpoint{}, nil + }) +} + +func withTestCollectorEndpointInjected(ce *testCollectorEndpoint) EndpointOption { + return endpointOptionFunc(func() (batchUploader, error) { + return ce, nil + }) +} + +func TestExporterExportSpan(t *testing.T) { + const ( + serviceName = "test-service" + tagKey = "key" + tagVal = "val" + ) + + testCollector := &testCollectorEndpoint{} + exp, err := New(withTestCollectorEndpointInjected(testCollector)) + require.NoError(t, err) + tp := sdktrace.NewTracerProvider( + sdktrace.WithBatcher(exp), + sdktrace.WithResource(resource.NewSchemaless( + semconv.ServiceNameKey.String(serviceName), + attribute.String(tagKey, tagVal), + )), + ) + + tracer := tp.Tracer("test-tracer") + + ctx := context.Background() + for i := 0; i < 3; i++ { + _, span := tracer.Start(ctx, fmt.Sprintf("test-span-%d", i)) + span.End() + assert.True(t, span.SpanContext().IsValid()) + } + + require.NoError(t, tp.Shutdown(ctx)) + + batchesUploaded := testCollector.batchesUploaded + require.Len(t, batchesUploaded, 1) + uploadedBatch := batchesUploaded[0] + assert.Equal(t, serviceName, uploadedBatch.GetProcess().GetServiceName()) + assert.Len(t, uploadedBatch.GetSpans(), 3) + + require.Len(t, uploadedBatch.GetProcess().GetTags(), 1) + assert.Equal(t, tagKey, uploadedBatch.GetProcess().GetTags()[0].GetKey()) + assert.Equal(t, tagVal, uploadedBatch.GetProcess().GetTags()[0].GetVStr()) +} + +func Test_spanSnapshotToThrift(t *testing.T) { + now := time.Now() + traceID, _ := trace.TraceIDFromHex("0102030405060708090a0b0c0d0e0f10") + spanID, _ := trace.SpanIDFromHex("0102030405060708") + parentSpanID, _ := trace.SpanIDFromHex("0807060504030201") + + linkTraceID, _ := trace.TraceIDFromHex("0102030405060709090a0b0c0d0e0f11") + linkSpanID, _ := trace.SpanIDFromHex("0102030405060709") + + eventNameValue := "event-test" + eventDropped := int64(10) + keyValue := "value" + statusCodeValue := int64(1) + doubleValue := 123.456 + intValue := int64(123) + boolTrue := true + arrValue := "[0,1,2,3]" + statusMessage := "this is a problem" + spanKind := "client" + rv1 := "rv11" + rv2 := int64(5) + instrLibName := "instrumentation-library" + instrLibVersion := "semver:1.0.0" + + tests := []struct { + name string + data tracetest.SpanStub + want *gen.Span + }{ + { + name: "no status description", + data: tracetest.SpanStub{ + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: spanID, + }), + Name: "/foo", + StartTime: now, + EndTime: now, + Status: sdktrace.Status{Code: codes.Error}, + SpanKind: trace.SpanKindClient, + InstrumentationLibrary: instrumentation.Library{ + Name: instrLibName, + Version: instrLibVersion, + }, + }, + want: &gen.Span{ + TraceIdLow: 651345242494996240, + TraceIdHigh: 72623859790382856, + SpanId: 72623859790382856, + OperationName: "/foo", + StartTime: now.UnixNano() / 1000, + Duration: 0, + Tags: []*gen.Tag{ + {Key: keyError, VType: gen.TagType_BOOL, VBool: &boolTrue}, + {Key: keyInstrumentationLibraryName, VType: gen.TagType_STRING, VStr: &instrLibName}, + {Key: keyInstrumentationLibraryVersion, VType: gen.TagType_STRING, VStr: &instrLibVersion}, + {Key: keyStatusCode, VType: gen.TagType_LONG, VLong: &statusCodeValue}, + // Should not have a status message because it was unset + {Key: keySpanKind, VType: gen.TagType_STRING, VStr: &spanKind}, + }, + }, + }, + { + name: "no parent", + data: tracetest.SpanStub{ + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: spanID, + }), + Name: "/foo", + StartTime: now, + EndTime: now, + Links: []trace.Link{ + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: linkTraceID, + SpanID: linkSpanID, + }), + }, + }, + Attributes: []attribute.KeyValue{ + attribute.String("key", keyValue), + attribute.Float64("double", doubleValue), + attribute.Int64("int", intValue), + }, + Events: []sdktrace.Event{ + { + Name: eventNameValue, + Attributes: []attribute.KeyValue{attribute.String("k1", keyValue)}, + DroppedAttributeCount: int(eventDropped), + Time: now, + }, + }, + Status: sdktrace.Status{ + Code: codes.Error, + Description: statusMessage, + }, + SpanKind: trace.SpanKindClient, + InstrumentationLibrary: instrumentation.Library{ + Name: instrLibName, + Version: instrLibVersion, + }, + }, + want: &gen.Span{ + TraceIdLow: 651345242494996240, + TraceIdHigh: 72623859790382856, + SpanId: 72623859790382856, + OperationName: "/foo", + StartTime: now.UnixNano() / 1000, + Duration: 0, + Tags: []*gen.Tag{ + {Key: "double", VType: gen.TagType_DOUBLE, VDouble: &doubleValue}, + {Key: "key", VType: gen.TagType_STRING, VStr: &keyValue}, + {Key: "int", VType: gen.TagType_LONG, VLong: &intValue}, + {Key: keyError, VType: gen.TagType_BOOL, VBool: &boolTrue}, + {Key: keyInstrumentationLibraryName, VType: gen.TagType_STRING, VStr: &instrLibName}, + {Key: keyInstrumentationLibraryVersion, VType: gen.TagType_STRING, VStr: &instrLibVersion}, + {Key: keyStatusCode, VType: gen.TagType_LONG, VLong: &statusCodeValue}, + {Key: keyStatusMessage, VType: gen.TagType_STRING, VStr: &statusMessage}, + {Key: keySpanKind, VType: gen.TagType_STRING, VStr: &spanKind}, + }, + References: []*gen.SpanRef{ + { + RefType: gen.SpanRefType_FOLLOWS_FROM, + TraceIdHigh: int64(binary.BigEndian.Uint64(linkTraceID[0:8])), + TraceIdLow: int64(binary.BigEndian.Uint64(linkTraceID[8:16])), + SpanId: int64(binary.BigEndian.Uint64(linkSpanID[:])), + }, + }, + Logs: []*gen.Log{ + { + Timestamp: now.UnixNano() / 1000, + Fields: []*gen.Tag{ + { + Key: keyEventName, + VStr: &eventNameValue, + VType: gen.TagType_STRING, + }, + { + Key: "k1", + VStr: &keyValue, + VType: gen.TagType_STRING, + }, + { + Key: keyDroppedAttributeCount, + VLong: &eventDropped, + VType: gen.TagType_LONG, + }, + }, + }, + }, + }, + }, + { + name: "with parent", + data: tracetest.SpanStub{ + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: spanID, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: parentSpanID, + }), + Links: []trace.Link{ + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: linkTraceID, + SpanID: linkSpanID, + }), + }, + }, + Name: "/foo", + StartTime: now, + EndTime: now, + Attributes: []attribute.KeyValue{ + attribute.Array("arr", []int{0, 1, 2, 3}), + }, + Status: sdktrace.Status{ + Code: codes.Unset, + Description: statusMessage, + }, + SpanKind: trace.SpanKindInternal, + InstrumentationLibrary: instrumentation.Library{ + Name: instrLibName, + Version: instrLibVersion, + }, + }, + want: &gen.Span{ + TraceIdLow: 651345242494996240, + TraceIdHigh: 72623859790382856, + SpanId: 72623859790382856, + ParentSpanId: 578437695752307201, + OperationName: "/foo", + StartTime: now.UnixNano() / 1000, + Duration: 0, + Tags: []*gen.Tag{ + // status code, message and span kind should NOT be populated + {Key: "arr", VType: gen.TagType_STRING, VStr: &arrValue}, + {Key: keyInstrumentationLibraryName, VType: gen.TagType_STRING, VStr: &instrLibName}, + {Key: keyInstrumentationLibraryVersion, VType: gen.TagType_STRING, VStr: &instrLibVersion}, + }, + References: []*gen.SpanRef{ + { + RefType: gen.SpanRefType_FOLLOWS_FROM, + TraceIdHigh: int64(binary.BigEndian.Uint64(linkTraceID[0:8])), + TraceIdLow: int64(binary.BigEndian.Uint64(linkTraceID[8:16])), + SpanId: int64(binary.BigEndian.Uint64(linkSpanID[:])), + }, + }, + }, + }, + { + name: "resources do not affect the tags", + data: tracetest.SpanStub{ + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: spanID, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: parentSpanID, + }), + Name: "/foo", + StartTime: now, + EndTime: now, + Resource: resource.NewSchemaless( + attribute.String("rk1", rv1), + attribute.Int64("rk2", rv2), + semconv.ServiceNameKey.String("service name"), + ), + Status: sdktrace.Status{ + Code: codes.Unset, + Description: statusMessage, + }, + SpanKind: trace.SpanKindInternal, + InstrumentationLibrary: instrumentation.Library{ + Name: instrLibName, + Version: instrLibVersion, + }, + }, + want: &gen.Span{ + TraceIdLow: 651345242494996240, + TraceIdHigh: 72623859790382856, + SpanId: 72623859790382856, + ParentSpanId: 578437695752307201, + OperationName: "/foo", + StartTime: now.UnixNano() / 1000, + Duration: 0, + Tags: []*gen.Tag{ + {Key: keyInstrumentationLibraryName, VType: gen.TagType_STRING, VStr: &instrLibName}, + {Key: keyInstrumentationLibraryVersion, VType: gen.TagType_STRING, VStr: &instrLibVersion}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := spanToThrift(tt.data.Snapshot()) + sort.Slice(got.Tags, func(i, j int) bool { + return got.Tags[i].Key < got.Tags[j].Key + }) + sort.Slice(tt.want.Tags, func(i, j int) bool { + return tt.want.Tags[i].Key < tt.want.Tags[j].Key + }) + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("Diff%v", diff) + } + }) + } +} + +func TestExporterShutdownHonorsCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + e, err := New(withTestCollectorEndpoint()) + require.NoError(t, err) + assert.EqualError(t, e.Shutdown(ctx), context.Canceled.Error()) +} + +func TestExporterShutdownHonorsTimeout(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + <-ctx.Done() + + e, err := New(withTestCollectorEndpoint()) + require.NoError(t, err) + assert.EqualError(t, e.Shutdown(ctx), context.DeadlineExceeded.Error()) + cancel() +} + +func TestErrorOnExportShutdownExporter(t *testing.T) { + e, err := New(withTestCollectorEndpoint()) + require.NoError(t, err) + assert.NoError(t, e.Shutdown(context.Background())) + assert.NoError(t, e.ExportSpans(context.Background(), nil)) +} + +func TestExporterExportSpansHonorsCancel(t *testing.T) { + e, err := New(withTestCollectorEndpoint()) + require.NoError(t, err) + now := time.Now() + ss := tracetest.SpanStubs{ + { + Name: "s1", + Resource: resource.NewSchemaless( + semconv.ServiceNameKey.String("name"), + attribute.Key("r1").String("v1"), + ), + StartTime: now, + EndTime: now, + }, + { + Name: "s2", + Resource: resource.NewSchemaless( + semconv.ServiceNameKey.String("name"), + attribute.Key("r2").String("v2"), + ), + StartTime: now, + EndTime: now, + }, + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + assert.EqualError(t, e.ExportSpans(ctx, ss.Snapshots()), context.Canceled.Error()) +} + +func TestExporterExportSpansHonorsTimeout(t *testing.T) { + e, err := New(withTestCollectorEndpoint()) + require.NoError(t, err) + now := time.Now() + ss := tracetest.SpanStubs{ + { + Name: "s1", + Resource: resource.NewSchemaless( + semconv.ServiceNameKey.String("name"), + attribute.Key("r1").String("v1"), + ), + StartTime: now, + EndTime: now, + }, + { + Name: "s2", + Resource: resource.NewSchemaless( + semconv.ServiceNameKey.String("name"), + attribute.Key("r2").String("v2"), + ), + StartTime: now, + EndTime: now, + }, + } + ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) + defer cancel() + <-ctx.Done() + + assert.EqualError(t, e.ExportSpans(ctx, ss.Snapshots()), context.DeadlineExceeded.Error()) +} + +func TestJaegerBatchList(t *testing.T) { + newString := func(value string) *string { + return &value + } + spanKind := "unspecified" + now := time.Now() + + testCases := []struct { + name string + roSpans []sdktrace.ReadOnlySpan + defaultServiceName string + expectedBatchList []*gen.Batch + }{ + { + name: "no span shots", + roSpans: nil, + expectedBatchList: nil, + }, + { + name: "span's snapshot contains nil span", + roSpans: []sdktrace.ReadOnlySpan{ + tracetest.SpanStub{ + Name: "s1", + Resource: resource.NewSchemaless( + semconv.ServiceNameKey.String("name"), + attribute.Key("r1").String("v1"), + ), + StartTime: now, + EndTime: now, + }.Snapshot(), + nil, + }, + expectedBatchList: []*gen.Batch{ + { + Process: &gen.Process{ + ServiceName: "name", + Tags: []*gen.Tag{ + {Key: "r1", VType: gen.TagType_STRING, VStr: newString("v1")}, + }, + }, + Spans: []*gen.Span{ + { + OperationName: "s1", + Tags: []*gen.Tag{ + {Key: keySpanKind, VType: gen.TagType_STRING, VStr: &spanKind}, + }, + StartTime: now.UnixNano() / 1000, + }, + }, + }, + }, + }, + { + name: "merge spans that have the same resources", + roSpans: tracetest.SpanStubs{ + { + Name: "s1", + Resource: resource.NewSchemaless( + semconv.ServiceNameKey.String("name"), + attribute.Key("r1").String("v1"), + ), + StartTime: now, + EndTime: now, + }, + { + Name: "s2", + Resource: resource.NewSchemaless( + semconv.ServiceNameKey.String("name"), + attribute.Key("r1").String("v1"), + ), + StartTime: now, + EndTime: now, + }, + { + Name: "s3", + Resource: resource.NewSchemaless( + semconv.ServiceNameKey.String("name"), + attribute.Key("r2").String("v2"), + ), + StartTime: now, + EndTime: now, + }, + }.Snapshots(), + expectedBatchList: []*gen.Batch{ + { + Process: &gen.Process{ + ServiceName: "name", + Tags: []*gen.Tag{ + {Key: "r1", VType: gen.TagType_STRING, VStr: newString("v1")}, + }, + }, + Spans: []*gen.Span{ + { + OperationName: "s1", + Tags: []*gen.Tag{ + {Key: "span.kind", VType: gen.TagType_STRING, VStr: &spanKind}, + }, + StartTime: now.UnixNano() / 1000, + }, + { + OperationName: "s2", + Tags: []*gen.Tag{ + {Key: "span.kind", VType: gen.TagType_STRING, VStr: &spanKind}, + }, + StartTime: now.UnixNano() / 1000, + }, + }, + }, + { + Process: &gen.Process{ + ServiceName: "name", + Tags: []*gen.Tag{ + {Key: "r2", VType: gen.TagType_STRING, VStr: newString("v2")}, + }, + }, + Spans: []*gen.Span{ + { + OperationName: "s3", + Tags: []*gen.Tag{ + {Key: "span.kind", VType: gen.TagType_STRING, VStr: &spanKind}, + }, + StartTime: now.UnixNano() / 1000, + }, + }, + }, + }, + }, + { + name: "no service name in spans", + roSpans: tracetest.SpanStubs{ + { + Name: "s1", + Resource: resource.NewSchemaless( + attribute.Key("r1").String("v1"), + ), + StartTime: now, + EndTime: now, + }, + }.Snapshots(), + defaultServiceName: "default service name", + expectedBatchList: []*gen.Batch{ + { + Process: &gen.Process{ + ServiceName: "default service name", + Tags: []*gen.Tag{ + {Key: "r1", VType: gen.TagType_STRING, VStr: newString("v1")}, + }, + }, + Spans: []*gen.Span{ + { + OperationName: "s1", + Tags: []*gen.Tag{ + {Key: "span.kind", VType: gen.TagType_STRING, VStr: &spanKind}, + }, + StartTime: now.UnixNano() / 1000, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + batchList := jaegerBatchList(tc.roSpans, tc.defaultServiceName) + + assert.ElementsMatch(t, tc.expectedBatchList, batchList) + }) + } +} + +func TestProcess(t *testing.T) { + v1 := "v1" + + testCases := []struct { + name string + res *resource.Resource + defaultServiceName string + expectedProcess *gen.Process + }{ + { + name: "resources contain service name", + res: resource.NewSchemaless( + semconv.ServiceNameKey.String("service name"), + attribute.Key("r1").String("v1"), + ), + defaultServiceName: "default service name", + expectedProcess: &gen.Process{ + ServiceName: "service name", + Tags: []*gen.Tag{ + {Key: "r1", VType: gen.TagType_STRING, VStr: &v1}, + }, + }, + }, + { + name: "resources don't have service name", + res: resource.NewSchemaless(attribute.Key("r1").String("v1")), + defaultServiceName: "default service name", + expectedProcess: &gen.Process{ + ServiceName: "default service name", + Tags: []*gen.Tag{ + {Key: "r1", VType: gen.TagType_STRING, VStr: &v1}, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pro := process(tc.res, tc.defaultServiceName) + + assert.Equal(t, tc.expectedProcess, pro) + }) + } +} diff --git a/exporters/jaeger/reconnecting_udp_client.go b/exporters/jaeger/reconnecting_udp_client.go new file mode 100644 index 00000000000..ae4bf87f586 --- /dev/null +++ b/exporters/jaeger/reconnecting_udp_client.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" +) + +// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +type reconnectingUDPConn struct { + // `sync/atomic` expects the first word in an allocated struct to be 64-bit + // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details. + bufferBytes int64 + hostPort string + resolveFunc resolveFunc + dialFunc dialFunc + logger *log.Logger + + connMtx sync.RWMutex + conn *net.UDPConn + destAddr *net.UDPAddr + closeChan chan struct{} +} + +type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error) +type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) + +// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +func newReconnectingUDPConn(hostPort string, bufferBytes int, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger *log.Logger) (*reconnectingUDPConn, error) { + conn := &reconnectingUDPConn{ + hostPort: hostPort, + resolveFunc: resolveFunc, + dialFunc: dialFunc, + logger: logger, + closeChan: make(chan struct{}), + bufferBytes: int64(bufferBytes), + } + + if err := conn.attemptResolveAndDial(); err != nil { + conn.logf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout) + } + + go conn.reconnectLoop(resolveTimeout) + + return conn, nil +} + +func (c *reconnectingUDPConn) logf(format string, args ...interface{}) { + if c.logger != nil { + c.logger.Printf(format, args...) + } +} + +func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) { + ticker := time.NewTicker(resolveTimeout) + defer ticker.Stop() + + for { + select { + case <-c.closeChan: + return + case <-ticker.C: + if err := c.attemptResolveAndDial(); err != nil { + c.logf("%s", err.Error()) + } + } + } +} + +func (c *reconnectingUDPConn) attemptResolveAndDial() error { + newAddr, err := c.resolveFunc("udp", c.hostPort) + if err != nil { + return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err) + } + + c.connMtx.RLock() + curAddr := c.destAddr + c.connMtx.RUnlock() + + // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn + if curAddr != nil && newAddr.String() == curAddr.String() { + return nil + } + + if err := c.attemptDialNewAddr(newAddr); err != nil { + return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err) + } + + return nil +} + +func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error { + connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr) + if err != nil { + return err + } + + if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 { + if err = connUDP.SetWriteBuffer(bufferBytes); err != nil { + return err + } + } + + c.connMtx.Lock() + c.destAddr = newAddr + // store prev to close later + prevConn := c.conn + c.conn = connUDP + c.connMtx.Unlock() + + if prevConn != nil { + return prevConn.Close() + } + + return nil +} + +// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning +func (c *reconnectingUDPConn) Write(b []byte) (int, error) { + var bytesWritten int + var err error + + c.connMtx.RLock() + conn := c.conn + c.connMtx.RUnlock() + + if conn == nil { + // if connection is not initialized indicate this with err in order to hook into retry logic + err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved") + } else { + bytesWritten, err = conn.Write(b) + } + + if err == nil { + return bytesWritten, nil + } + + // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again + if reconnErr := c.attemptResolveAndDial(); reconnErr == nil { + c.connMtx.RLock() + conn := c.conn + c.connMtx.RUnlock() + + return conn.Write(b) + } + + // return original error if reconn fails + return bytesWritten, err +} + +// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation +func (c *reconnectingUDPConn) Close() error { + close(c.closeChan) + + // acquire rw lock before closing conn to ensure calls to Write drain + c.connMtx.Lock() + defer c.connMtx.Unlock() + + if c.conn != nil { + return c.conn.Close() + } + + return nil +} + +// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held +// and SetWriteBuffer is called store bufferBytes to be set for new conns +func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error { + var err error + + c.connMtx.RLock() + conn := c.conn + c.connMtx.RUnlock() + + if conn != nil { + err = c.conn.SetWriteBuffer(bytes) + } + + if err == nil { + atomic.StoreInt64(&c.bufferBytes, int64(bytes)) + } + + return err +} diff --git a/exporters/jaeger/reconnecting_udp_client_test.go b/exporters/jaeger/reconnecting_udp_client_test.go new file mode 100644 index 00000000000..f21d4c13b0f --- /dev/null +++ b/exporters/jaeger/reconnecting_udp_client_test.go @@ -0,0 +1,498 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package jaeger + +import ( + "context" + "fmt" + "math/rand" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type mockResolver struct { + mock.Mock +} + +func (m *mockResolver) ResolveUDPAddr(network string, hostPort string) (*net.UDPAddr, error) { + args := m.Called(network, hostPort) + + a0 := args.Get(0) + if a0 == nil { + return (*net.UDPAddr)(nil), args.Error(1) + } + return a0.(*net.UDPAddr), args.Error(1) +} + +type mockDialer struct { + mock.Mock +} + +func (m *mockDialer) DialUDP(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) { + args := m.Called(network, laddr, raddr) + + a0 := args.Get(0) + if a0 == nil { + return (*net.UDPConn)(nil), args.Error(1) + } + + return a0.(*net.UDPConn), args.Error(1) +} + +func newUDPListener() (net.PacketConn, error) { + return net.ListenPacket("udp", "127.0.0.1:0") +} + +func newUDPConn() (net.PacketConn, *net.UDPConn, error) { + mockServer, err := newUDPListener() + if err != nil { + return nil, nil, err + } + + addr, err := net.ResolveUDPAddr("udp", mockServer.LocalAddr().String()) + if err != nil { + mockServer.Close() + return nil, nil, err + } + + conn, err := net.DialUDP("udp", nil, addr) + if err != nil { + mockServer.Close() + return nil, nil, err + } + + return mockServer, conn, nil +} + +func assertConnWritable(t *testing.T, conn udpConn, serverConn net.PacketConn) { + expectedString := "yo this is a test" + _, err := conn.Write([]byte(expectedString)) + require.NoError(t, err) + + var buf = make([]byte, len(expectedString)) + err = serverConn.SetReadDeadline(time.Now().Add(time.Second)) + require.NoError(t, err) + + _, _, err = serverConn.ReadFrom(buf) + require.NoError(t, err) + require.Equal(t, []byte(expectedString), buf) +} + +func waitForCallWithTimeout(call *mock.Call) bool { + called := make(chan struct{}) + call.Run(func(args mock.Arguments) { + if !isChannelClosed(called) { + close(called) + } + }) + + var wasCalled bool + // wait at most 100 milliseconds for the second call of ResolveUDPAddr that is supposed to fail + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + select { + case <-called: + wasCalled = true + case <-ctx.Done(): + fmt.Println("timed out") + } + cancel() + + return wasCalled +} + +func isChannelClosed(ch <-chan struct{}) bool { + select { + case <-ch: + return true + default: + } + return false +} + +func waitForConnCondition(conn *reconnectingUDPConn, condition func(conn *reconnectingUDPConn) bool) bool { + var conditionVal bool + for i := 0; i < 10; i++ { + conn.connMtx.RLock() + conditionVal = condition(conn) + conn.connMtx.RUnlock() + if conditionVal || i >= 9 { + break + } + + time.Sleep(time.Millisecond * 10) + } + + return conditionVal +} + +func newMockUDPAddr(t *testing.T, port int) *net.UDPAddr { + var buf = make([]byte, 4) + // random is not seeded to ensure tests are deterministic (also doesnt matter if ip is valid) + _, err := rand.Read(buf) + require.NoError(t, err) + + return &net.UDPAddr{ + IP: net.IPv4(buf[0], buf[1], buf[2], buf[3]), + Port: port, + } +} + +func TestNewResolvedUDPConn(t *testing.T) { + hostPort := "blahblah:34322" + + mockServer, clientConn, err := newUDPConn() + require.NoError(t, err) + defer mockServer.Close() + + mockUDPAddr := newMockUDPAddr(t, 34322) + + resolver := mockResolver{} + resolver. + On("ResolveUDPAddr", "udp", hostPort). + Return(mockUDPAddr, nil). + Once() + + dialer := mockDialer{} + dialer. + On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr). + Return(clientConn, nil). + Once() + + conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Hour, resolver.ResolveUDPAddr, dialer.DialUDP, nil) + assert.NoError(t, err) + require.NotNil(t, conn) + + err = conn.Close() + assert.NoError(t, err) + + // assert the actual connection was closed + assert.Error(t, clientConn.Close()) + + resolver.AssertExpectations(t) + dialer.AssertExpectations(t) +} + +func TestResolvedUDPConnWrites(t *testing.T) { + hostPort := "blahblah:34322" + + mockServer, clientConn, err := newUDPConn() + require.NoError(t, err) + defer mockServer.Close() + + mockUDPAddr := newMockUDPAddr(t, 34322) + + resolver := mockResolver{} + resolver. + On("ResolveUDPAddr", "udp", hostPort). + Return(mockUDPAddr, nil). + Once() + + dialer := mockDialer{} + dialer. + On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr). + Return(clientConn, nil). + Once() + + conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Hour, resolver.ResolveUDPAddr, dialer.DialUDP, nil) + assert.NoError(t, err) + require.NotNil(t, conn) + + assertConnWritable(t, conn, mockServer) + + err = conn.Close() + assert.NoError(t, err) + + // assert the actual connection was closed + assert.Error(t, clientConn.Close()) + + resolver.AssertExpectations(t) + dialer.AssertExpectations(t) +} + +func TestResolvedUDPConnEventuallyDials(t *testing.T) { + hostPort := "blahblah:34322" + + mockServer, clientConn, err := newUDPConn() + require.NoError(t, err) + defer mockServer.Close() + + mockUDPAddr := newMockUDPAddr(t, 34322) + + resolver := mockResolver{} + resolver. + On("ResolveUDPAddr", "udp", hostPort). + Return(nil, fmt.Errorf("failed to resolve")).Once(). + On("ResolveUDPAddr", "udp", hostPort). + Return(mockUDPAddr, nil) + + dialer := mockDialer{} + dialCall := dialer. + On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr). + Return(clientConn, nil).Once() + + conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, nil) + assert.NoError(t, err) + require.NotNil(t, conn) + + err = conn.SetWriteBuffer(udpPacketMaxLength) + assert.NoError(t, err) + + wasCalled := waitForCallWithTimeout(dialCall) + assert.True(t, wasCalled) + + connEstablished := waitForConnCondition(conn, func(conn *reconnectingUDPConn) bool { + return conn.conn != nil + }) + + assert.True(t, connEstablished) + + assertConnWritable(t, conn, mockServer) + assertSockBufferSize(t, udpPacketMaxLength, clientConn) + + err = conn.Close() + assert.NoError(t, err) + + // assert the actual connection was closed + assert.Error(t, clientConn.Close()) + + resolver.AssertExpectations(t) + dialer.AssertExpectations(t) +} + +func TestResolvedUDPConnNoSwapIfFail(t *testing.T) { + hostPort := "blahblah:34322" + + mockServer, clientConn, err := newUDPConn() + require.NoError(t, err) + defer mockServer.Close() + + mockUDPAddr := newMockUDPAddr(t, 34322) + + resolver := mockResolver{} + resolver. + On("ResolveUDPAddr", "udp", hostPort). + Return(mockUDPAddr, nil).Once() + + failCall := resolver.On("ResolveUDPAddr", "udp", hostPort). + Return(nil, fmt.Errorf("resolve failed")) + + dialer := mockDialer{} + dialer. + On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr). + Return(clientConn, nil).Once() + + conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, nil) + assert.NoError(t, err) + require.NotNil(t, conn) + + wasCalled := waitForCallWithTimeout(failCall) + + assert.True(t, wasCalled) + + assertConnWritable(t, conn, mockServer) + + err = conn.Close() + assert.NoError(t, err) + + // assert the actual connection was closed + assert.Error(t, clientConn.Close()) + + resolver.AssertExpectations(t) + dialer.AssertExpectations(t) +} + +func TestResolvedUDPConnWriteRetry(t *testing.T) { + hostPort := "blahblah:34322" + + mockServer, clientConn, err := newUDPConn() + require.NoError(t, err) + defer mockServer.Close() + + mockUDPAddr := newMockUDPAddr(t, 34322) + + resolver := mockResolver{} + resolver. + On("ResolveUDPAddr", "udp", hostPort). + Return(nil, fmt.Errorf("failed to resolve")).Once(). + On("ResolveUDPAddr", "udp", hostPort). + Return(mockUDPAddr, nil).Once() + + dialer := mockDialer{} + dialer. + On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr). + Return(clientConn, nil).Once() + + conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, nil) + assert.NoError(t, err) + require.NotNil(t, conn) + + err = conn.SetWriteBuffer(udpPacketMaxLength) + assert.NoError(t, err) + + assertConnWritable(t, conn, mockServer) + assertSockBufferSize(t, udpPacketMaxLength, clientConn) + + err = conn.Close() + assert.NoError(t, err) + + // assert the actual connection was closed + assert.Error(t, clientConn.Close()) + + resolver.AssertExpectations(t) + dialer.AssertExpectations(t) +} + +func TestResolvedUDPConnWriteRetryFails(t *testing.T) { + hostPort := "blahblah:34322" + + resolver := mockResolver{} + resolver. + On("ResolveUDPAddr", "udp", hostPort). + Return(nil, fmt.Errorf("failed to resolve")).Twice() + + dialer := mockDialer{} + + conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, nil) + assert.NoError(t, err) + require.NotNil(t, conn) + + err = conn.SetWriteBuffer(udpPacketMaxLength) + assert.NoError(t, err) + + _, err = conn.Write([]byte("yo this is a test")) + + assert.Error(t, err) + + err = conn.Close() + assert.NoError(t, err) + + resolver.AssertExpectations(t) + dialer.AssertExpectations(t) +} + +func TestResolvedUDPConnChanges(t *testing.T) { + hostPort := "blahblah:34322" + + mockServer, clientConn, err := newUDPConn() + require.NoError(t, err) + defer mockServer.Close() + + mockUDPAddr1 := newMockUDPAddr(t, 34322) + + mockServer2, clientConn2, err := newUDPConn() + require.NoError(t, err) + defer mockServer2.Close() + + mockUDPAddr2 := newMockUDPAddr(t, 34322) + + // ensure address doesn't duplicate mockUDPAddr1 + for i := 0; i < 10 && mockUDPAddr2.IP.Equal(mockUDPAddr1.IP); i++ { + mockUDPAddr2 = newMockUDPAddr(t, 34322) + } + + // this is really unlikely to ever fail the test, but its here as a safeguard + require.False(t, mockUDPAddr2.IP.Equal(mockUDPAddr1.IP)) + + resolver := mockResolver{} + resolver. + On("ResolveUDPAddr", "udp", hostPort). + Return(mockUDPAddr1, nil).Once(). + On("ResolveUDPAddr", "udp", hostPort). + Return(mockUDPAddr2, nil) + + dialer := mockDialer{} + dialer. + On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr1). + Return(clientConn, nil).Once() + + secondDial := dialer. + On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr2). + Return(clientConn2, nil).Once() + + conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, nil) + assert.NoError(t, err) + require.NotNil(t, conn) + + err = conn.SetWriteBuffer(udpPacketMaxLength) + assert.NoError(t, err) + + wasCalled := waitForCallWithTimeout(secondDial) + assert.True(t, wasCalled) + + connSwapped := waitForConnCondition(conn, func(conn *reconnectingUDPConn) bool { + return conn.conn == clientConn2 + }) + + assert.True(t, connSwapped) + + assertConnWritable(t, conn, mockServer2) + assertSockBufferSize(t, udpPacketMaxLength, clientConn2) + + err = conn.Close() + assert.NoError(t, err) + + // assert the prev connection was closed + assert.Error(t, clientConn.Close()) + + // assert the actual connection was closed + assert.Error(t, clientConn2.Close()) + + resolver.AssertExpectations(t) + dialer.AssertExpectations(t) +} + +func TestResolvedUDPConnLoopWithoutChanges(t *testing.T) { + hostPort := "blahblah:34322" + + mockServer, clientConn, err := newUDPConn() + require.NoError(t, err) + defer mockServer.Close() + + mockUDPAddr := newMockUDPAddr(t, 34322) + + resolver := mockResolver{} + resolver. + On("ResolveUDPAddr", "udp", hostPort). + Return(mockUDPAddr, nil) + + dialer := mockDialer{} + dialer. + On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr). + Return(clientConn, nil). + Once() + + resolveTimeout := 500 * time.Millisecond + conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, resolveTimeout, resolver.ResolveUDPAddr, dialer.DialUDP, nil) + assert.NoError(t, err) + require.NotNil(t, conn) + assert.Equal(t, mockUDPAddr, conn.destAddr) + + // Waiting for one round of loop + time.Sleep(3 * resolveTimeout) + assert.Equal(t, mockUDPAddr, conn.destAddr) + + err = conn.Close() + assert.NoError(t, err) + + // assert the actual connection was closed + assert.Error(t, clientConn.Close()) + + resolver.AssertExpectations(t) + dialer.AssertExpectations(t) +} diff --git a/exporters/jaeger/uploader.go b/exporters/jaeger/uploader.go new file mode 100644 index 00000000000..452b3aeb5de --- /dev/null +++ b/exporters/jaeger/uploader.go @@ -0,0 +1,313 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" + + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" +) + +// batchUploader send a batch of spans to Jaeger +type batchUploader interface { + upload(context.Context, *gen.Batch) error + shutdown(context.Context) error +} + +type EndpointOption interface { + newBatchUploader() (batchUploader, error) +} + +type endpointOptionFunc func() (batchUploader, error) + +func (fn endpointOptionFunc) newBatchUploader() (batchUploader, error) { + return fn() +} + +// WithAgentEndpoint configures the Jaeger exporter to send spans to a jaeger-agent. This will +// use the following environment variables for configuration if no explicit option is provided: +// +// - OTEL_EXPORTER_JAEGER_AGENT_HOST is used for the agent address host +// - OTEL_EXPORTER_JAEGER_AGENT_PORT is used for the agent address port +// +// The passed options will take precedence over any environment variables and default values +// will be used if neither are provided. +func WithAgentEndpoint(options ...AgentEndpointOption) EndpointOption { + return endpointOptionFunc(func() (batchUploader, error) { + cfg := &agentEndpointConfig{ + agentClientUDPParams{ + AttemptReconnecting: true, + Host: envOr(envAgentHost, "localhost"), + Port: envOr(envAgentPort, "6832"), + }, + } + for _, opt := range options { + opt.apply(cfg) + } + + client, err := newAgentClientUDP(cfg.agentClientUDPParams) + if err != nil { + return nil, err + } + + return &agentUploader{client: client}, nil + }) +} + +type AgentEndpointOption interface { + apply(*agentEndpointConfig) +} + +type agentEndpointConfig struct { + agentClientUDPParams +} + +type agentEndpointOptionFunc func(*agentEndpointConfig) + +func (fn agentEndpointOptionFunc) apply(cfg *agentEndpointConfig) { + fn(cfg) +} + +// WithAgentHost sets a host to be used in the agent client endpoint. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_AGENT_HOST environment variable. +// If this option is not passed and the env var is not set, "localhost" will be used by default. +func WithAgentHost(host string) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.Host = host + }) +} + +// WithAgentPort sets a port to be used in the agent client endpoint. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_AGENT_PORT environment variable. +// If this option is not passed and the env var is not set, "6832" will be used by default. +func WithAgentPort(port string) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.Port = port + }) +} + +// WithLogger sets a logger to be used by agent client. +func WithLogger(logger *log.Logger) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.Logger = logger + }) +} + +// WithDisableAttemptReconnecting sets option to disable reconnecting udp client. +func WithDisableAttemptReconnecting() AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.AttemptReconnecting = false + }) +} + +// WithAttemptReconnectingInterval sets the interval between attempts to re resolve agent endpoint. +func WithAttemptReconnectingInterval(interval time.Duration) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.AttemptReconnectInterval = interval + }) +} + +// WithMaxPacketSize sets the maximum UDP packet size for transport to the Jaeger agent. +func WithMaxPacketSize(size int) AgentEndpointOption { + return agentEndpointOptionFunc(func(o *agentEndpointConfig) { + o.MaxPacketSize = size + }) +} + +// WithCollectorEndpoint defines the full url to the Jaeger HTTP Thrift collector. This will +// use the following environment variables for configuration if no explicit option is provided: +// +// - OTEL_EXPORTER_JAEGER_ENDPOINT is the HTTP endpoint for sending spans directly to a collector. +// - OTEL_EXPORTER_JAEGER_USER is the username to be sent as authentication to the collector endpoint. +// - OTEL_EXPORTER_JAEGER_PASSWORD is the password to be sent as authentication to the collector endpoint. +// +// The passed options will take precedence over any environment variables. +// If neither values are provided for the endpoint, the default value of "http://localhost:14268/api/traces" will be used. +// If neither values are provided for the username or the password, they will not be set since there is no default. +func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption { + return endpointOptionFunc(func() (batchUploader, error) { + cfg := &collectorEndpointConfig{ + endpoint: envOr(envEndpoint, "http://localhost:14268/api/traces"), + username: envOr(envUser, ""), + password: envOr(envPassword, ""), + httpClient: http.DefaultClient, + } + + for _, opt := range options { + opt.apply(cfg) + } + + return &collectorUploader{ + endpoint: cfg.endpoint, + username: cfg.username, + password: cfg.password, + httpClient: cfg.httpClient, + }, nil + }) +} + +type CollectorEndpointOption interface { + apply(*collectorEndpointConfig) +} + +type collectorEndpointConfig struct { + // endpoint for sending spans directly to a collector. + endpoint string + + // username to be used for authentication with the collector endpoint. + username string + + // password to be used for authentication with the collector endpoint. + password string + + // httpClient to be used to make requests to the collector endpoint. + httpClient *http.Client +} + +type collectorEndpointOptionFunc func(*collectorEndpointConfig) + +func (fn collectorEndpointOptionFunc) apply(cfg *collectorEndpointConfig) { + fn(cfg) +} + +// WithEndpoint is the URL for the Jaeger collector that spans are sent to. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_ENDPOINT environment variable. +// If this option is not passed and the environment variable is not set, +// "http://localhost:14268/api/traces" will be used by default. +func WithEndpoint(endpoint string) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o *collectorEndpointConfig) { + o.endpoint = endpoint + }) +} + +// WithUsername sets the username to be used in the authorization header sent for all requests to the collector. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_USER environment variable. +// If this option is not passed and the environment variable is not set, no username will be set. +func WithUsername(username string) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o *collectorEndpointConfig) { + o.username = username + }) +} + +// WithPassword sets the password to be used in the authorization header sent for all requests to the collector. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_PASSWORD environment variable. +// If this option is not passed and the environment variable is not set, no password will be set. +func WithPassword(password string) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o *collectorEndpointConfig) { + o.password = password + }) +} + +// WithHTTPClient sets the http client to be used to make request to the collector endpoint. +func WithHTTPClient(client *http.Client) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o *collectorEndpointConfig) { + o.httpClient = client + }) +} + +// agentUploader implements batchUploader interface sending batches to +// Jaeger through the UDP agent. +type agentUploader struct { + client *agentClientUDP +} + +var _ batchUploader = (*agentUploader)(nil) + +func (a *agentUploader) shutdown(ctx context.Context) error { + done := make(chan error, 1) + go func() { + done <- a.client.Close() + }() + + select { + case <-ctx.Done(): + // Prioritize not blocking the calling thread and just leak the + // spawned goroutine to close the client. + return ctx.Err() + case err := <-done: + return err + } +} + +func (a *agentUploader) upload(ctx context.Context, batch *gen.Batch) error { + return a.client.EmitBatch(ctx, batch) +} + +// collectorUploader implements batchUploader interface sending batches to +// Jaeger through the collector http endpoint. +type collectorUploader struct { + endpoint string + username string + password string + httpClient *http.Client +} + +var _ batchUploader = (*collectorUploader)(nil) + +func (c *collectorUploader) shutdown(ctx context.Context) error { + // The Exporter will cancel any active exports and will prevent all + // subsequent exports, so nothing to do here. + return nil +} + +func (c *collectorUploader) upload(ctx context.Context, batch *gen.Batch) error { + body, err := serialize(batch) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, "POST", c.endpoint, body) + if err != nil { + return err + } + if c.username != "" && c.password != "" { + req.SetBasicAuth(c.username, c.password) + } + req.Header.Set("Content-Type", "application/x-thrift") + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + + _, _ = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode) + } + return nil +} + +func serialize(obj thrift.TStruct) (*bytes.Buffer, error) { + buf := thrift.NewTMemoryBuffer() + if err := obj.Write(context.Background(), thrift.NewTBinaryProtocolConf(buf, &thrift.TConfiguration{})); err != nil { + return nil, err + } + return buf.Buffer, nil +} diff --git a/exporters/metric/prometheus/go.mod b/exporters/metric/prometheus/go.mod index 6c514383a2f..b8d7a9ba60b 100644 --- a/exporters/metric/prometheus/go.mod +++ b/exporters/metric/prometheus/go.mod @@ -65,6 +65,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric +replace go.opentelemetry.io/otel/exporters/jaeger => ../../jaeger + +replace go.opentelemetry.io/otel/exporters/prometheus => ../../prometheus + +replace go.opentelemetry.io/otel/exporters/zipkin => ../../zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlp/otlpmetric/otlpmetricgrpc diff --git a/exporters/metric/prometheus/prometheus.go b/exporters/metric/prometheus/prometheus.go index a1b3ec4d1a9..a743479c1e1 100644 --- a/exporters/metric/prometheus/prometheus.go +++ b/exporters/metric/prometheus/prometheus.go @@ -12,6 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package prometheus provices an OpenTelemetry metrics exporter for +// Prometheus. +// +// Deprecated: Use Prometheus exporter in package +// go.opentelemetry.io/otel/exporters/prometheus. +// +// This package is frozen and no new functionality will be added. package prometheus // import "go.opentelemetry.io/otel/exporters/metric/prometheus" // Note that this package does not support a way to export Prometheus diff --git a/exporters/metric/prometheus/prometheus_test.go b/exporters/metric/prometheus/prometheus_test.go index 1245cd2582a..842db4de0cb 100644 --- a/exporters/metric/prometheus/prometheus_test.go +++ b/exporters/metric/prometheus/prometheus_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/metric/prometheus" + "go.opentelemetry.io/otel/exporters/metric/prometheus" //nolint:staticcheck "go.opentelemetry.io/otel/metric" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" diff --git a/exporters/otlp/otlpmetric/go.mod b/exporters/otlp/otlpmetric/go.mod index fa718e45bd9..6a7a17076b0 100644 --- a/exporters/otlp/otlpmetric/go.mod +++ b/exporters/otlp/otlpmetric/go.mod @@ -73,3 +73,9 @@ replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../trace/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../../internal/tools replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric + +replace go.opentelemetry.io/otel/exporters/jaeger => ../../jaeger + +replace go.opentelemetry.io/otel/exporters/prometheus => ../../prometheus + +replace go.opentelemetry.io/otel/exporters/zipkin => ../../zipkin diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod index 165873cf455..160ea4f4006 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod @@ -71,3 +71,9 @@ replace go.opentelemetry.io/otel/internal/tools => ../../../../internal/tools replace go.opentelemetry.io/otel/sdk/export/metric => ../../../../sdk/export/metric replace go.opentelemetry.io/otel/internal/metric => ../../../../internal/metric + +replace go.opentelemetry.io/otel/exporters/jaeger => ../../../jaeger + +replace go.opentelemetry.io/otel/exporters/prometheus => ../../../prometheus + +replace go.opentelemetry.io/otel/exporters/zipkin => ../../../zipkin diff --git a/exporters/otlp/otlptrace/go.mod b/exporters/otlp/otlptrace/go.mod index 08b92c71d7d..da66f4480e1 100644 --- a/exporters/otlp/otlptrace/go.mod +++ b/exporters/otlp/otlptrace/go.mod @@ -44,7 +44,7 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../../example/promethe replace go.opentelemetry.io/otel/example/zipkin => ../../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../prometheus replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ./ @@ -52,9 +52,9 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ./otl replace go.opentelemetry.io/otel/exporters/stdout => ../../stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../zipkin replace go.opentelemetry.io/otel/internal/tools => ../../../internal/tools @@ -68,6 +68,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ./otl replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../otlpmetric/otlpmetricgrpc diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.mod b/exporters/otlp/otlptrace/otlptracegrpc/go.mod index f715a2daa58..f459c086c46 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.mod +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.mod @@ -43,15 +43,15 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../../../example/prome replace go.opentelemetry.io/otel/example/zipkin => ../../../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../../metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../../prometheus replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ./ replace go.opentelemetry.io/otel/exporters/stdout => ../../../stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../../trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../../jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../../trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../../zipkin replace go.opentelemetry.io/otel/internal/tools => ../../../../internal/tools @@ -65,6 +65,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../ot replace go.opentelemetry.io/otel/internal/metric => ../../../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../../metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../../trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../../trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlpmetric/otlpmetricgrpc diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.mod b/exporters/otlp/otlptrace/otlptracehttp/go.mod index e1e8e07efb6..7fa389ca726 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.mod +++ b/exporters/otlp/otlptrace/otlptracehttp/go.mod @@ -34,7 +34,7 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../../../example/prome replace go.opentelemetry.io/otel/example/zipkin => ../../../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../../metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../../prometheus replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../otlptracegrpc @@ -42,9 +42,9 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ./ replace go.opentelemetry.io/otel/exporters/stdout => ../../../stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../../trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../../jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../../trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../../zipkin replace go.opentelemetry.io/otel/internal/tools => ../../../../internal/tools @@ -62,6 +62,12 @@ replace go.opentelemetry.io/otel/trace => ../../../../trace replace go.opentelemetry.io/otel/internal/metric => ../../../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../../metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../../trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../../trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlpmetric/otlpmetricgrpc diff --git a/exporters/prometheus/README.md b/exporters/prometheus/README.md new file mode 100644 index 00000000000..ccded2f2829 --- /dev/null +++ b/exporters/prometheus/README.md @@ -0,0 +1,9 @@ +# OpenTelemetry-Go Prometheus Exporter + +OpenTelemetry Prometheus exporter + +## Installation + +``` +go get -u go.opentelemetry.io/otel/exporters/prometheus +``` diff --git a/exporters/prometheus/go.mod b/exporters/prometheus/go.mod new file mode 100644 index 00000000000..318ad952d4c --- /dev/null +++ b/exporters/prometheus/go.mod @@ -0,0 +1,77 @@ +module go.opentelemetry.io/otel/exporters/prometheus + +go 1.15 + +require ( + github.com/prometheus/client_golang v1.11.0 + github.com/stretchr/testify v1.7.0 + go.opentelemetry.io/otel v0.20.0 + go.opentelemetry.io/otel/metric v0.20.0 + go.opentelemetry.io/otel/sdk v0.20.0 + go.opentelemetry.io/otel/sdk/export/metric v0.20.0 + go.opentelemetry.io/otel/sdk/metric v0.20.0 +) + +replace go.opentelemetry.io/otel => ../.. + +replace go.opentelemetry.io/otel/bridge/opencensus => ../../bridge/opencensus + +replace go.opentelemetry.io/otel/bridge/opentracing => ../../bridge/opentracing + +replace go.opentelemetry.io/otel/example/jaeger => ../../example/jaeger + +replace go.opentelemetry.io/otel/example/namedtracer => ../../example/namedtracer + +replace go.opentelemetry.io/otel/example/opencensus => ../../example/opencensus + +replace go.opentelemetry.io/otel/example/otel-collector => ../../example/otel-collector + +replace go.opentelemetry.io/otel/example/passthrough => ../../example/passthrough + +replace go.opentelemetry.io/otel/example/prom-collector => ../../example/prom-collector + +replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus + +replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin + +replace go.opentelemetry.io/otel/exporters/otlp => ../otlp + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../otlp/otlptrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../otlp/otlptrace/otlptracegrpc + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../otlp/otlptrace/otlptracehttp + +replace go.opentelemetry.io/otel/exporters/prometheus => ./ + +replace go.opentelemetry.io/otel/exporters/stdout => ../stdout + +replace go.opentelemetry.io/otel/exporters/jaeger => ../jaeger + +replace go.opentelemetry.io/otel/exporters/zipkin => ../zipkin + +replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric + +replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools + +replace go.opentelemetry.io/otel/metric => ../../metric + +replace go.opentelemetry.io/otel/oteltest => ../../oteltest + +replace go.opentelemetry.io/otel/sdk => ../../sdk + +replace go.opentelemetry.io/otel/sdk/export/metric => ../../sdk/export/metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric + +replace go.opentelemetry.io/otel/trace => ../../trace + +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../trace/zipkin + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../otlp/otlpmetric + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../otlp/otlpmetric/otlpmetricgrpc diff --git a/exporters/prometheus/go.sum b/exporters/prometheus/go.sum new file mode 100644 index 00000000000..06f89f08003 --- /dev/null +++ b/exporters/prometheus/go.sum @@ -0,0 +1,150 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/exporters/prometheus/prometheus.go b/exporters/prometheus/prometheus.go new file mode 100644 index 00000000000..c9dbe6c922f --- /dev/null +++ b/exporters/prometheus/prometheus.go @@ -0,0 +1,317 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" + +// Note that this package does not support a way to export Prometheus +// Summary data points, removed in PR#1412. + +import ( + "context" + "fmt" + "net/http" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/number" + export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregation" + controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" +) + +// Exporter supports Prometheus pulls. It does not implement the +// sdk/export/metric.Exporter interface--instead it creates a pull +// controller and reads the latest checkpointed data on-scrape. +type Exporter struct { + handler http.Handler + + registerer prometheus.Registerer + gatherer prometheus.Gatherer + + // lock protects access to the controller. The controller + // exposes its own lock, but using a dedicated lock in this + // struct allows the exporter to potentially support multiple + // controllers (e.g., with different resources). + lock sync.RWMutex + controller *controller.Controller + + defaultHistogramBoundaries []float64 +} + +// ErrUnsupportedAggregator is returned for unrepresentable aggregator +// types (e.g., exact). +var ErrUnsupportedAggregator = fmt.Errorf("unsupported aggregator type") + +var _ http.Handler = &Exporter{} + +// Config is a set of configs for the tally reporter. +type Config struct { + // Registry is the prometheus registry that will be used as the default Registerer and + // Gatherer if these are not specified. + // + // If not set a new empty Registry is created. + Registry *prometheus.Registry + + // Registerer is the prometheus registerer to register + // metrics with. + // + // If not specified the Registry will be used as default. + Registerer prometheus.Registerer + + // Gatherer is the prometheus gatherer to gather + // metrics with. + // + // If not specified the Registry will be used as default. + Gatherer prometheus.Gatherer + + // DefaultHistogramBoundaries defines the default histogram bucket + // boundaries. + DefaultHistogramBoundaries []float64 +} + +// New returns a new Prometheus exporter using the configured metric +// controller. See controller.New(). +func New(config Config, controller *controller.Controller) (*Exporter, error) { + if config.Registry == nil { + config.Registry = prometheus.NewRegistry() + } + + if config.Registerer == nil { + config.Registerer = config.Registry + } + + if config.Gatherer == nil { + config.Gatherer = config.Registry + } + + e := &Exporter{ + handler: promhttp.HandlerFor(config.Gatherer, promhttp.HandlerOpts{}), + registerer: config.Registerer, + gatherer: config.Gatherer, + controller: controller, + defaultHistogramBoundaries: config.DefaultHistogramBoundaries, + } + + c := &collector{ + exp: e, + } + if err := config.Registerer.Register(c); err != nil { + return nil, fmt.Errorf("cannot register the collector: %w", err) + } + return e, nil +} + +// MeterProvider returns the MeterProvider of this exporter. +func (e *Exporter) MeterProvider() metric.MeterProvider { + return e.controller.MeterProvider() +} + +// Controller returns the controller object that coordinates collection for the SDK. +func (e *Exporter) Controller() *controller.Controller { + e.lock.RLock() + defer e.lock.RUnlock() + return e.controller +} + +// ExportKindFor implements ExportKindSelector. +func (e *Exporter) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) export.ExportKind { + return export.CumulativeExportKindSelector().ExportKindFor(desc, kind) +} + +// ServeHTTP implements http.Handler. +func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) { + e.handler.ServeHTTP(w, r) +} + +// collector implements prometheus.Collector interface. +type collector struct { + exp *Exporter +} + +var _ prometheus.Collector = (*collector)(nil) + +// Describe implements prometheus.Collector. +func (c *collector) Describe(ch chan<- *prometheus.Desc) { + c.exp.lock.RLock() + defer c.exp.lock.RUnlock() + + _ = c.exp.Controller().ForEach(c.exp, func(record export.Record) error { + var labelKeys []string + mergeLabels(record, &labelKeys, nil) + ch <- c.toDesc(record, labelKeys) + return nil + }) +} + +// Collect exports the last calculated CheckpointSet. +// +// Collect is invoked whenever prometheus.Gatherer is also invoked. +// For example, when the HTTP endpoint is invoked by Prometheus. +func (c *collector) Collect(ch chan<- prometheus.Metric) { + c.exp.lock.RLock() + defer c.exp.lock.RUnlock() + + ctrl := c.exp.Controller() + if err := ctrl.Collect(context.Background()); err != nil { + otel.Handle(err) + } + + err := ctrl.ForEach(c.exp, func(record export.Record) error { + agg := record.Aggregation() + numberKind := record.Descriptor().NumberKind() + instrumentKind := record.Descriptor().InstrumentKind() + + var labelKeys, labels []string + mergeLabels(record, &labelKeys, &labels) + + desc := c.toDesc(record, labelKeys) + + if hist, ok := agg.(aggregation.Histogram); ok { + if err := c.exportHistogram(ch, hist, numberKind, desc, labels); err != nil { + return fmt.Errorf("exporting histogram: %w", err) + } + } else if sum, ok := agg.(aggregation.Sum); ok && instrumentKind.Monotonic() { + if err := c.exportMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil { + return fmt.Errorf("exporting monotonic counter: %w", err) + } + } else if sum, ok := agg.(aggregation.Sum); ok && !instrumentKind.Monotonic() { + if err := c.exportNonMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil { + return fmt.Errorf("exporting non monotonic counter: %w", err) + } + } else if lastValue, ok := agg.(aggregation.LastValue); ok { + if err := c.exportLastValue(ch, lastValue, numberKind, desc, labels); err != nil { + return fmt.Errorf("exporting last value: %w", err) + } + } else { + return fmt.Errorf("%w: %s", ErrUnsupportedAggregator, agg.Kind()) + } + return nil + }) + if err != nil { + otel.Handle(err) + } +} + +func (c *collector) exportLastValue(ch chan<- prometheus.Metric, lvagg aggregation.LastValue, kind number.Kind, desc *prometheus.Desc, labels []string) error { + lv, _, err := lvagg.LastValue() + if err != nil { + return fmt.Errorf("error retrieving last value: %w", err) + } + + m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, lv.CoerceToFloat64(kind), labels...) + if err != nil { + return fmt.Errorf("error creating constant metric: %w", err) + } + + ch <- m + return nil +} + +func (c *collector) exportNonMonotonicCounter(ch chan<- prometheus.Metric, sum aggregation.Sum, kind number.Kind, desc *prometheus.Desc, labels []string) error { + v, err := sum.Sum() + if err != nil { + return fmt.Errorf("error retrieving counter: %w", err) + } + + m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, v.CoerceToFloat64(kind), labels...) + if err != nil { + return fmt.Errorf("error creating constant metric: %w", err) + } + + ch <- m + return nil +} + +func (c *collector) exportMonotonicCounter(ch chan<- prometheus.Metric, sum aggregation.Sum, kind number.Kind, desc *prometheus.Desc, labels []string) error { + v, err := sum.Sum() + if err != nil { + return fmt.Errorf("error retrieving counter: %w", err) + } + + m, err := prometheus.NewConstMetric(desc, prometheus.CounterValue, v.CoerceToFloat64(kind), labels...) + if err != nil { + return fmt.Errorf("error creating constant metric: %w", err) + } + + ch <- m + return nil +} + +func (c *collector) exportHistogram(ch chan<- prometheus.Metric, hist aggregation.Histogram, kind number.Kind, desc *prometheus.Desc, labels []string) error { + buckets, err := hist.Histogram() + if err != nil { + return fmt.Errorf("error retrieving histogram: %w", err) + } + sum, err := hist.Sum() + if err != nil { + return fmt.Errorf("error retrieving sum: %w", err) + } + + var totalCount uint64 + // counts maps from the bucket upper-bound to the cumulative count. + // The bucket with upper-bound +inf is not included. + counts := make(map[float64]uint64, len(buckets.Boundaries)) + for i := range buckets.Boundaries { + boundary := buckets.Boundaries[i] + totalCount += uint64(buckets.Counts[i]) + counts[boundary] = totalCount + } + // Include the +inf bucket in the total count. + totalCount += uint64(buckets.Counts[len(buckets.Counts)-1]) + + m, err := prometheus.NewConstHistogram(desc, totalCount, sum.CoerceToFloat64(kind), counts, labels...) + if err != nil { + return fmt.Errorf("error creating constant histogram: %w", err) + } + + ch <- m + return nil +} + +func (c *collector) toDesc(record export.Record, labelKeys []string) *prometheus.Desc { + desc := record.Descriptor() + return prometheus.NewDesc(sanitize(desc.Name()), desc.Description(), labelKeys, nil) +} + +// mergeLabels merges the export.Record's labels and resources into a +// single set, giving precedence to the record's labels in case of +// duplicate keys. This outputs one or both of the keys and the +// values as a slice, and either argument may be nil to avoid +// allocating an unnecessary slice. +func mergeLabels(record export.Record, keys, values *[]string) { + if keys != nil { + *keys = make([]string, 0, record.Labels().Len()+record.Resource().Len()) + } + if values != nil { + *values = make([]string, 0, record.Labels().Len()+record.Resource().Len()) + } + + // Duplicate keys are resolved by taking the record label value over + // the resource value. + mi := attribute.NewMergeIterator(record.Labels(), record.Resource().Set()) + for mi.Next() { + label := mi.Label() + if keys != nil { + *keys = append(*keys, sanitize(string(label.Key))) + } + if values != nil { + *values = append(*values, label.Value.Emit()) + } + } +} diff --git a/exporters/prometheus/prometheus_test.go b/exporters/prometheus/prometheus_test.go new file mode 100644 index 00000000000..0f3895813c7 --- /dev/null +++ b/exporters/prometheus/prometheus_test.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "context" + "fmt" + "net/http/httptest" + "sort" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/metric" + export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" + controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" + processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" + selector "go.opentelemetry.io/otel/sdk/metric/selector/simple" + "go.opentelemetry.io/otel/sdk/resource" +) + +type expectedMetric struct { + kind string + name string + help string + values []string +} + +func (e *expectedMetric) lines() []string { + ret := []string{ + fmt.Sprintf("# HELP %s %s", e.name, e.help), + fmt.Sprintf("# TYPE %s %s", e.name, e.kind), + } + + ret = append(ret, e.values...) + + return ret +} + +func expectCounterWithHelp(name, help, value string) expectedMetric { + return expectedMetric{ + kind: "counter", + name: name, + help: help, + values: []string{value}, + } +} + +func expectCounter(name, value string) expectedMetric { + return expectCounterWithHelp(name, "", value) +} + +func expectGauge(name, value string) expectedMetric { + return expectedMetric{ + kind: "gauge", + name: name, + values: []string{value}, + } +} + +func expectHistogram(name string, values ...string) expectedMetric { + return expectedMetric{ + kind: "histogram", + name: name, + values: values, + } +} + +func newPipeline(config prometheus.Config, options ...controller.Option) (*prometheus.Exporter, error) { + c := controller.New( + processor.New( + selector.NewWithHistogramDistribution( + histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries), + ), + export.CumulativeExportKindSelector(), + processor.WithMemory(true), + ), + options..., + ) + return prometheus.New(config, c) +} + +func TestPrometheusExporter(t *testing.T) { + exporter, err := newPipeline( + prometheus.Config{ + DefaultHistogramBoundaries: []float64{-0.5, 1}, + }, + controller.WithCollectPeriod(0), + controller.WithResource(resource.NewSchemaless(attribute.String("R", "V"))), + ) + require.NoError(t, err) + + meter := exporter.MeterProvider().Meter("test") + upDownCounter := metric.Must(meter).NewFloat64UpDownCounter("updowncounter") + counter := metric.Must(meter).NewFloat64Counter("counter") + valuerecorder := metric.Must(meter).NewFloat64ValueRecorder("valuerecorder") + + labels := []attribute.KeyValue{ + attribute.Key("A").String("B"), + attribute.Key("C").String("D"), + } + ctx := context.Background() + + var expected []expectedMetric + + counter.Add(ctx, 10, labels...) + counter.Add(ctx, 5.3, labels...) + + expected = append(expected, expectCounter("counter", `counter{A="B",C="D",R="V"} 15.3`)) + + _ = metric.Must(meter).NewInt64ValueObserver("intobserver", func(_ context.Context, result metric.Int64ObserverResult) { + result.Observe(1, labels...) + }) + + expected = append(expected, expectGauge("intobserver", `intobserver{A="B",C="D",R="V"} 1`)) + + valuerecorder.Record(ctx, -0.6, labels...) + valuerecorder.Record(ctx, -0.4, labels...) + valuerecorder.Record(ctx, 0.6, labels...) + valuerecorder.Record(ctx, 20, labels...) + + expected = append(expected, expectHistogram("valuerecorder", + `valuerecorder_bucket{A="B",C="D",R="V",le="-0.5"} 1`, + `valuerecorder_bucket{A="B",C="D",R="V",le="1"} 3`, + `valuerecorder_bucket{A="B",C="D",R="V",le="+Inf"} 4`, + `valuerecorder_sum{A="B",C="D",R="V"} 19.6`, + `valuerecorder_count{A="B",C="D",R="V"} 4`, + )) + + upDownCounter.Add(ctx, 10, labels...) + upDownCounter.Add(ctx, -3.2, labels...) + + expected = append(expected, expectGauge("updowncounter", `updowncounter{A="B",C="D",R="V"} 6.8`)) + + compareExport(t, exporter, expected) + compareExport(t, exporter, expected) +} + +func compareExport(t *testing.T, exporter *prometheus.Exporter, expected []expectedMetric) { + rec := httptest.NewRecorder() + req := httptest.NewRequest("GET", "/metrics", nil) + exporter.ServeHTTP(rec, req) + + output := rec.Body.String() + lines := strings.Split(output, "\n") + + expectedLines := []string{""} + for _, v := range expected { + expectedLines = append(expectedLines, v.lines()...) + } + + sort.Strings(lines) + sort.Strings(expectedLines) + + require.Equal(t, expectedLines, lines) +} + +func TestPrometheusStatefulness(t *testing.T) { + // Create a meter + exporter, err := newPipeline( + prometheus.Config{}, + controller.WithCollectPeriod(0), + controller.WithResource(resource.Empty()), + ) + require.NoError(t, err) + + meter := exporter.MeterProvider().Meter("test") + + ctx := context.Background() + + counter := metric.Must(meter).NewInt64Counter( + "a.counter", + metric.WithDescription("Counts things"), + ) + + counter.Add(ctx, 100, attribute.String("key", "value")) + + compareExport(t, exporter, []expectedMetric{ + expectCounterWithHelp("a_counter", "Counts things", `a_counter{key="value"} 100`), + }) + + counter.Add(ctx, 100, attribute.String("key", "value")) + + compareExport(t, exporter, []expectedMetric{ + expectCounterWithHelp("a_counter", "Counts things", `a_counter{key="value"} 200`), + }) +} diff --git a/exporters/prometheus/sanitize.go b/exporters/prometheus/sanitize.go new file mode 100644 index 00000000000..cc9eff358cc --- /dev/null +++ b/exporters/prometheus/sanitize.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" + +import ( + "strings" + "unicode" +) + +// TODO(paivagustavo): we should provide a more uniform and controlled way of sanitizing. +// Letting users define wether we should try or not to sanitize metric names. +// This is a copy of sdk/internal/sanitize.go + +// sanitize returns a string that is truncated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func sanitize(s string) string { + if len(s) == 0 { + return s + } + // TODO(paivagustavo): change this to use a bytes buffer to avoid a large number of string allocations. + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/exporters/prometheus/sanitize_test.go b/exporters/prometheus/sanitize_test.go new file mode 100644 index 00000000000..7a6b9fd55ee --- /dev/null +++ b/exporters/prometheus/sanitize_test.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "testing" +) + +func TestSanitize(t *testing.T) { + tests := []struct { + name string + input string + want string + }{ + { + name: "replace character", + input: "test/key-1", + want: "test_key_1", + }, + { + name: "add prefix if starting with digit", + input: "0123456789", + want: "key_0123456789", + }, + { + name: "add prefix if starting with _", + input: "_0123456789", + want: "key_0123456789", + }, + { + name: "starts with _ after sanitization", + input: "/0123456789", + want: "key_0123456789", + }, + { + name: "valid input", + input: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789", + want: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got, want := sanitize(tt.input), tt.want; got != want { + t.Errorf("sanitize() = %q; want %q", got, want) + } + }) + } +} diff --git a/exporters/stdout/go.mod b/exporters/stdout/go.mod index 546878be3b1..90202e86ba1 100644 --- a/exporters/stdout/go.mod +++ b/exporters/stdout/go.mod @@ -36,13 +36,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../prometheus replace go.opentelemetry.io/otel/exporters/stdout => ./ -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -66,6 +66,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../ot replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../otlp/otlpmetric/otlpmetricgrpc diff --git a/exporters/trace/jaeger/doc.go b/exporters/trace/jaeger/doc.go index 57bb0dac8eb..1bcc04b4d4f 100644 --- a/exporters/trace/jaeger/doc.go +++ b/exporters/trace/jaeger/doc.go @@ -14,7 +14,8 @@ // Package jaeger contains an OpenTelemetry tracing exporter for Jaeger. // -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. +// Deprecated: Use Jaeger exporter in package +// go.opentelemetry.io/otel/exporters/jaeger. +// +// This package is frozen and no new functionality will be added. package jaeger // import "go.opentelemetry.io/otel/exporters/trace/jaeger" diff --git a/exporters/trace/jaeger/go.mod b/exporters/trace/jaeger/go.mod index 68e502396e4..ef71e39a64a 100644 --- a/exporters/trace/jaeger/go.mod +++ b/exporters/trace/jaeger/go.mod @@ -63,6 +63,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric +replace go.opentelemetry.io/otel/exporters/jaeger => ../../jaeger + +replace go.opentelemetry.io/otel/exporters/prometheus => ../../prometheus + +replace go.opentelemetry.io/otel/exporters/zipkin => ../../zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlp/otlpmetric/otlpmetricgrpc diff --git a/exporters/trace/zipkin/doc.go b/exporters/trace/zipkin/doc.go index 83a7b7631db..2373372cf7a 100644 --- a/exporters/trace/zipkin/doc.go +++ b/exporters/trace/zipkin/doc.go @@ -14,7 +14,8 @@ // Package zipkin contains an OpenTelemetry tracing exporter for Zipkin. // -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. +// Deprecated: Use Zipkin exporter in package +// go.opentelemetry.io/otel/exporters/zipkin. +// +// This package is frozen and no new functionality will be added. package zipkin // import "go.opentelemetry.io/otel/exporters/trace/zipkin" diff --git a/exporters/trace/zipkin/go.mod b/exporters/trace/zipkin/go.mod index 82dacccbeb6..a4e5878025f 100644 --- a/exporters/trace/zipkin/go.mod +++ b/exporters/trace/zipkin/go.mod @@ -64,6 +64,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric +replace go.opentelemetry.io/otel/exporters/jaeger => ../../jaeger + +replace go.opentelemetry.io/otel/exporters/prometheus => ../../prometheus + +replace go.opentelemetry.io/otel/exporters/zipkin => ../../zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlp/otlpmetric/otlpmetricgrpc diff --git a/exporters/zipkin/doc.go b/exporters/zipkin/doc.go new file mode 100644 index 00000000000..94683a611ad --- /dev/null +++ b/exporters/zipkin/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zipkin contains an OpenTelemetry tracing exporter for Zipkin. +// +// This package is currently in a pre-GA phase. Backwards incompatible changes +// may be introduced in subsequent minor version releases as we work to track +// the evolving OpenTelemetry specification and user feedback. +package zipkin // import "go.opentelemetry.io/otel/exporters/zipkin" diff --git a/exporters/zipkin/go.mod b/exporters/zipkin/go.mod new file mode 100644 index 00000000000..e99e9119c62 --- /dev/null +++ b/exporters/zipkin/go.mod @@ -0,0 +1,76 @@ +module go.opentelemetry.io/otel/exporters/zipkin + +go 1.15 + +require ( + github.com/google/go-cmp v0.5.6 + github.com/openzipkin/zipkin-go v0.2.5 + github.com/stretchr/testify v1.7.0 + go.opentelemetry.io/otel v0.20.0 + go.opentelemetry.io/otel/sdk v0.20.0 + go.opentelemetry.io/otel/trace v0.20.0 +) + +replace go.opentelemetry.io/otel/bridge/opencensus => ../../bridge/opencensus + +replace go.opentelemetry.io/otel/bridge/opentracing => ../../bridge/opentracing + +replace go.opentelemetry.io/otel/example/jaeger => ../../example/jaeger + +replace go.opentelemetry.io/otel/example/namedtracer => ../../example/namedtracer + +replace go.opentelemetry.io/otel/example/opencensus => ../../example/opencensus + +replace go.opentelemetry.io/otel/example/otel-collector => ../../example/otel-collector + +replace go.opentelemetry.io/otel/example/prom-collector => ../../example/prom-collector + +replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus + +replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin + +replace go.opentelemetry.io/otel/exporters/prometheus => ../prometheus + +replace go.opentelemetry.io/otel/exporters/otlp => ../otlp + +replace go.opentelemetry.io/otel/exporters/stdout => ../stdout + +replace go.opentelemetry.io/otel/exporters/jaeger => ../jaeger + +replace go.opentelemetry.io/otel/exporters/zipkin => ./ + +replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools + +replace go.opentelemetry.io/otel/metric => ../../metric + +replace go.opentelemetry.io/otel/oteltest => ../../oteltest + +replace go.opentelemetry.io/otel/sdk/export/metric => ../../sdk/export/metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric + +replace go.opentelemetry.io/otel/trace => ../../trace + +replace go.opentelemetry.io/otel/example/passthrough => ../../example/passthrough + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../otlp/otlptrace + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../otlp/otlptrace/otlptracegrpc + +replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../otlp/otlptrace/otlptracehttp + +replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric + +replace go.opentelemetry.io/otel => ../.. + +replace go.opentelemetry.io/otel/sdk => ../../sdk + +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../trace/zipkin + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../otlp/otlpmetric + +replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../otlp/otlpmetric/otlpmetricgrpc diff --git a/exporters/zipkin/go.sum b/exporters/zipkin/go.sum new file mode 100644 index 00000000000..5b4bad914ac --- /dev/null +++ b/exporters/zipkin/go.sum @@ -0,0 +1,108 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/exporters/zipkin/model.go b/exporters/zipkin/model.go new file mode 100644 index 00000000000..55c991fb3ff --- /dev/null +++ b/exporters/zipkin/model.go @@ -0,0 +1,288 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin // import "go.opentelemetry.io/otel/exporters/zipkin" + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "net" + "strconv" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" + + zkmodel "github.com/openzipkin/zipkin-go/model" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" + tracesdk "go.opentelemetry.io/otel/sdk/trace" +) + +const ( + keyInstrumentationLibraryName = "otel.library.name" + keyInstrumentationLibraryVersion = "otel.library.version" + + keyPeerHostname attribute.Key = "peer.hostname" + keyPeerAddress attribute.Key = "peer.address" +) + +var defaultServiceName string + +func init() { + // fetch service.name from default resource for backup + defaultResource := resource.Default() + if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists { + defaultServiceName = value.AsString() + } +} + +func toZipkinSpanModels(batch []tracesdk.ReadOnlySpan) []zkmodel.SpanModel { + models := make([]zkmodel.SpanModel, 0, len(batch)) + for _, data := range batch { + models = append(models, toZipkinSpanModel(data)) + } + return models +} + +func getServiceName(attrs []attribute.KeyValue) string { + for _, kv := range attrs { + if kv.Key == semconv.ServiceNameKey { + return kv.Value.AsString() + } + } + + return defaultServiceName +} + +func toZipkinSpanModel(data tracesdk.ReadOnlySpan) zkmodel.SpanModel { + return zkmodel.SpanModel{ + SpanContext: toZipkinSpanContext(data), + Name: data.Name(), + Kind: toZipkinKind(data.SpanKind()), + Timestamp: data.StartTime(), + Duration: data.EndTime().Sub(data.StartTime()), + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: getServiceName(data.Resource().Attributes()), + }, + RemoteEndpoint: toZipkinRemoteEndpoint(data), + Annotations: toZipkinAnnotations(data.Events()), + Tags: toZipkinTags(data), + } +} + +func toZipkinSpanContext(data tracesdk.ReadOnlySpan) zkmodel.SpanContext { + return zkmodel.SpanContext{ + TraceID: toZipkinTraceID(data.SpanContext().TraceID()), + ID: toZipkinID(data.SpanContext().SpanID()), + ParentID: toZipkinParentID(data.Parent().SpanID()), + Debug: false, + Sampled: nil, + Err: nil, + } +} + +func toZipkinTraceID(traceID trace.TraceID) zkmodel.TraceID { + return zkmodel.TraceID{ + High: binary.BigEndian.Uint64(traceID[:8]), + Low: binary.BigEndian.Uint64(traceID[8:]), + } +} + +func toZipkinID(spanID trace.SpanID) zkmodel.ID { + return zkmodel.ID(binary.BigEndian.Uint64(spanID[:])) +} + +func toZipkinParentID(spanID trace.SpanID) *zkmodel.ID { + if spanID.IsValid() { + id := toZipkinID(spanID) + return &id + } + return nil +} + +func toZipkinKind(kind trace.SpanKind) zkmodel.Kind { + switch kind { + case trace.SpanKindUnspecified: + return zkmodel.Undetermined + case trace.SpanKindInternal: + // The spec says we should set the kind to nil, but + // the model does not allow that. + return zkmodel.Undetermined + case trace.SpanKindServer: + return zkmodel.Server + case trace.SpanKindClient: + return zkmodel.Client + case trace.SpanKindProducer: + return zkmodel.Producer + case trace.SpanKindConsumer: + return zkmodel.Consumer + } + return zkmodel.Undetermined +} + +func toZipkinAnnotations(events []tracesdk.Event) []zkmodel.Annotation { + if len(events) == 0 { + return nil + } + annotations := make([]zkmodel.Annotation, 0, len(events)) + for _, event := range events { + value := event.Name + if len(event.Attributes) > 0 { + jsonString := attributesToJSONMapString(event.Attributes) + if jsonString != "" { + value = fmt.Sprintf("%s: %s", event.Name, jsonString) + } + } + annotations = append(annotations, zkmodel.Annotation{ + Timestamp: event.Time, + Value: value, + }) + } + return annotations +} + +func attributesToJSONMapString(attributes []attribute.KeyValue) string { + m := make(map[string]interface{}, len(attributes)) + for _, attribute := range attributes { + m[(string)(attribute.Key)] = attribute.Value.AsInterface() + } + // if an error happens, the result will be an empty string + jsonBytes, _ := json.Marshal(m) + return (string)(jsonBytes) +} + +// extraZipkinTags are those that may be added to every outgoing span +var extraZipkinTags = []string{ + "otel.status_code", + keyInstrumentationLibraryName, + keyInstrumentationLibraryVersion, +} + +func toZipkinTags(data tracesdk.ReadOnlySpan) map[string]string { + attr := data.Attributes() + m := make(map[string]string, len(attr)+len(extraZipkinTags)) + for _, kv := range attr { + switch kv.Value.Type() { + // For array attributes, serialize as JSON list string. + case attribute.ARRAY: + json, _ := json.Marshal(kv.Value.AsArray()) + m[(string)(kv.Key)] = (string)(json) + default: + m[(string)(kv.Key)] = kv.Value.Emit() + } + } + + if data.Status().Code != codes.Unset { + m["otel.status_code"] = data.Status().Code.String() + } + + if data.Status().Code == codes.Error { + m["error"] = data.Status().Description + } else { + delete(m, "error") + } + + if il := data.InstrumentationLibrary(); il.Name != "" { + m[keyInstrumentationLibraryName] = il.Name + if il.Version != "" { + m[keyInstrumentationLibraryVersion] = il.Version + } + } + + if len(m) == 0 { + return nil + } + + return m +} + +// Rank determines selection order for remote endpoint. See the specification +// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/trace/sdk_exporters/zipkin.md#otlp---zipkin +var remoteEndpointKeyRank = map[attribute.Key]int{ + semconv.PeerServiceKey: 0, + semconv.NetPeerNameKey: 1, + semconv.NetPeerIPKey: 2, + keyPeerHostname: 3, + keyPeerAddress: 4, + semconv.HTTPHostKey: 5, + semconv.DBNameKey: 6, +} + +func toZipkinRemoteEndpoint(data sdktrace.ReadOnlySpan) *zkmodel.Endpoint { + // Should be set only for client or producer kind + if sk := data.SpanKind(); sk != trace.SpanKindClient && sk != trace.SpanKindProducer { + return nil + } + + attr := data.Attributes() + var endpointAttr attribute.KeyValue + for _, kv := range attr { + rank, ok := remoteEndpointKeyRank[kv.Key] + if !ok { + continue + } + + currentKeyRank, ok := remoteEndpointKeyRank[endpointAttr.Key] + if ok && rank < currentKeyRank { + endpointAttr = kv + } else if !ok { + endpointAttr = kv + } + } + + if endpointAttr.Key == "" { + return nil + } + + if endpointAttr.Key != semconv.NetPeerIPKey && + endpointAttr.Value.Type() == attribute.STRING { + return &zkmodel.Endpoint{ + ServiceName: endpointAttr.Value.AsString(), + } + } + + return remoteEndpointPeerIPWithPort(endpointAttr.Value.AsString(), attr) +} + +// Handles `net.peer.ip` remote endpoint separately (should include `net.peer.ip` +// as well, if available). +func remoteEndpointPeerIPWithPort(peerIP string, attrs []attribute.KeyValue) *zkmodel.Endpoint { + ip := net.ParseIP(peerIP) + if ip == nil { + return nil + } + + endpoint := &zkmodel.Endpoint{} + // Determine if IPv4 or IPv6 + if ip.To4() != nil { + endpoint.IPv4 = ip + } else { + endpoint.IPv6 = ip + } + + for _, kv := range attrs { + if kv.Key == semconv.NetPeerPortKey { + port, _ := strconv.ParseUint(kv.Value.Emit(), 10, 16) + endpoint.Port = uint16(port) + return endpoint + } + } + + return endpoint +} diff --git a/exporters/zipkin/model_test.go b/exporters/zipkin/model_test.go new file mode 100644 index 00000000000..970040a0a1d --- /dev/null +++ b/exporters/zipkin/model_test.go @@ -0,0 +1,982 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "fmt" + "net" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + zkmodel "github.com/openzipkin/zipkin-go/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" +) + +func TestModelConversion(t *testing.T) { + resource := resource.NewSchemaless( + semconv.ServiceNameKey.String("model-test"), + ) + + inputBatch := tracetest.SpanStubs{ + // typical span data + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + }), + SpanKind: trace.SpanKindServer, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: []attribute.KeyValue{ + attribute.Int64("attr1", 42), + attribute.String("attr2", "bar"), + attribute.Array("attr3", []int{0, 1, 2}), + }, + Events: []tracesdk.Event{ + { + Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Name: "ev1", + Attributes: []attribute.KeyValue{ + attribute.Int64("eventattr1", 123), + }, + }, + { + Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Name: "ev2", + Attributes: nil, + }, + }, + Status: tracesdk.Status{ + Code: codes.Error, + Description: "404, file not found", + }, + Resource: resource, + }, + // span data with no parent (same as typical, but has + // invalid parent) + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + SpanKind: trace.SpanKindServer, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: []attribute.KeyValue{ + attribute.Int64("attr1", 42), + attribute.String("attr2", "bar"), + }, + Events: []tracesdk.Event{ + { + Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Name: "ev1", + Attributes: []attribute.KeyValue{ + attribute.Int64("eventattr1", 123), + }, + }, + { + Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Name: "ev2", + Attributes: nil, + }, + }, + Status: tracesdk.Status{ + Code: codes.Error, + Description: "404, file not found", + }, + Resource: resource, + }, + // span data of unspecified kind + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + }), + SpanKind: trace.SpanKindUnspecified, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: []attribute.KeyValue{ + attribute.Int64("attr1", 42), + attribute.String("attr2", "bar"), + }, + Events: []tracesdk.Event{ + { + Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Name: "ev1", + Attributes: []attribute.KeyValue{ + attribute.Int64("eventattr1", 123), + }, + }, + { + Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Name: "ev2", + Attributes: nil, + }, + }, + Status: tracesdk.Status{ + Code: codes.Error, + Description: "404, file not found", + }, + Resource: resource, + }, + // span data of internal kind + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + }), + SpanKind: trace.SpanKindInternal, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: []attribute.KeyValue{ + attribute.Int64("attr1", 42), + attribute.String("attr2", "bar"), + }, + Events: []tracesdk.Event{ + { + Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Name: "ev1", + Attributes: []attribute.KeyValue{ + attribute.Int64("eventattr1", 123), + }, + }, + { + Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Name: "ev2", + Attributes: nil, + }, + }, + Status: tracesdk.Status{ + Code: codes.Error, + Description: "404, file not found", + }, + Resource: resource, + }, + // span data of client kind + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + }), + SpanKind: trace.SpanKindClient, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: []attribute.KeyValue{ + attribute.Int64("attr1", 42), + attribute.String("attr2", "bar"), + attribute.String("peer.hostname", "test-peer-hostname"), + attribute.String("net.peer.ip", "1.2.3.4"), + attribute.Int64("net.peer.port", 9876), + }, + Events: []tracesdk.Event{ + { + Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Name: "ev1", + Attributes: []attribute.KeyValue{ + attribute.Int64("eventattr1", 123), + }, + }, + { + Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Name: "ev2", + Attributes: nil, + }, + }, + Status: tracesdk.Status{ + Code: codes.Error, + Description: "404, file not found", + }, + Resource: resource, + }, + // span data of producer kind + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + }), + SpanKind: trace.SpanKindProducer, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: []attribute.KeyValue{ + attribute.Int64("attr1", 42), + attribute.String("attr2", "bar"), + }, + Events: []tracesdk.Event{ + { + Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Name: "ev1", + Attributes: []attribute.KeyValue{ + attribute.Int64("eventattr1", 123), + }, + }, + { + Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Name: "ev2", + Attributes: nil, + }, + }, + Status: tracesdk.Status{ + Code: codes.Error, + Description: "404, file not found", + }, + Resource: resource, + }, + // span data of consumer kind + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + }), + SpanKind: trace.SpanKindConsumer, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: []attribute.KeyValue{ + attribute.Int64("attr1", 42), + attribute.String("attr2", "bar"), + }, + Events: []tracesdk.Event{ + { + Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Name: "ev1", + Attributes: []attribute.KeyValue{ + attribute.Int64("eventattr1", 123), + }, + }, + { + Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Name: "ev2", + Attributes: nil, + }, + }, + Status: tracesdk.Status{ + Code: codes.Error, + Description: "404, file not found", + }, + Resource: resource, + }, + // span data with no events + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + }), + SpanKind: trace.SpanKindServer, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: []attribute.KeyValue{ + attribute.Int64("attr1", 42), + attribute.String("attr2", "bar"), + }, + Events: nil, + Status: tracesdk.Status{ + Code: codes.Error, + Description: "404, file not found", + }, + Resource: resource, + }, + // span data with an "error" attribute set to "false" + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + }), + SpanKind: trace.SpanKindServer, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: []attribute.KeyValue{ + attribute.String("error", "false"), + }, + Events: []tracesdk.Event{ + { + Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Name: "ev1", + Attributes: []attribute.KeyValue{ + attribute.Int64("eventattr1", 123), + }, + }, + { + Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Name: "ev2", + Attributes: nil, + }, + }, + Resource: resource, + }, + }.Snapshots() + + expectedOutputBatch := []zkmodel.SpanModel{ + // model for typical span data + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "SERVER", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "model-test", + }, + RemoteEndpoint: nil, + Annotations: []zkmodel.Annotation{ + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Value: `ev1: {"eventattr1":123}`, + }, + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Value: "ev2", + }, + }, + Tags: map[string]string{ + "attr1": "42", + "attr2": "bar", + "attr3": "[0,1,2]", + "otel.status_code": "Error", + "error": "404, file not found", + }, + }, + // model for span data with no parent + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: nil, + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "SERVER", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "model-test", + }, + RemoteEndpoint: nil, + Annotations: []zkmodel.Annotation{ + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Value: `ev1: {"eventattr1":123}`, + }, + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Value: "ev2", + }, + }, + Tags: map[string]string{ + "attr1": "42", + "attr2": "bar", + "otel.status_code": "Error", + "error": "404, file not found", + }, + }, + // model for span data of unspecified kind + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "model-test", + }, + RemoteEndpoint: nil, + Annotations: []zkmodel.Annotation{ + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Value: `ev1: {"eventattr1":123}`, + }, + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Value: "ev2", + }, + }, + Tags: map[string]string{ + "attr1": "42", + "attr2": "bar", + "otel.status_code": "Error", + "error": "404, file not found", + }, + }, + // model for span data of internal kind + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "model-test", + }, + RemoteEndpoint: nil, + Annotations: []zkmodel.Annotation{ + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Value: `ev1: {"eventattr1":123}`, + }, + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Value: "ev2", + }, + }, + Tags: map[string]string{ + "attr1": "42", + "attr2": "bar", + "otel.status_code": "Error", + "error": "404, file not found", + }, + }, + // model for span data of client kind + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "CLIENT", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "model-test", + }, + RemoteEndpoint: &zkmodel.Endpoint{ + IPv4: net.ParseIP("1.2.3.4"), + Port: 9876, + }, + Annotations: []zkmodel.Annotation{ + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Value: `ev1: {"eventattr1":123}`, + }, + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Value: "ev2", + }, + }, + Tags: map[string]string{ + "attr1": "42", + "attr2": "bar", + "net.peer.ip": "1.2.3.4", + "net.peer.port": "9876", + "peer.hostname": "test-peer-hostname", + "otel.status_code": "Error", + "error": "404, file not found", + }, + }, + // model for span data of producer kind + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "PRODUCER", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "model-test", + }, + RemoteEndpoint: nil, + Annotations: []zkmodel.Annotation{ + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Value: `ev1: {"eventattr1":123}`, + }, + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Value: "ev2", + }, + }, + Tags: map[string]string{ + "attr1": "42", + "attr2": "bar", + "otel.status_code": "Error", + "error": "404, file not found", + }, + }, + // model for span data of consumer kind + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "CONSUMER", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "model-test", + }, + RemoteEndpoint: nil, + Annotations: []zkmodel.Annotation{ + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Value: `ev1: {"eventattr1":123}`, + }, + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Value: "ev2", + }, + }, + Tags: map[string]string{ + "attr1": "42", + "attr2": "bar", + "otel.status_code": "Error", + "error": "404, file not found", + }, + }, + // model for span data with no events + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "SERVER", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "model-test", + }, + RemoteEndpoint: nil, + Annotations: nil, + Tags: map[string]string{ + "attr1": "42", + "attr2": "bar", + "otel.status_code": "Error", + "error": "404, file not found", + }, + }, + // model for span data with an "error" attribute set to "false" + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "SERVER", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "model-test", + }, + RemoteEndpoint: nil, + Annotations: []zkmodel.Annotation{ + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), + Value: `ev1: {"eventattr1":123}`, + }, + { + Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Value: "ev2", + }, + }, + Tags: nil, // should be omitted + }, + } + gottenOutputBatch := toZipkinSpanModels(inputBatch) + require.Equal(t, expectedOutputBatch, gottenOutputBatch) +} + +func zkmodelIDPtr(n uint64) *zkmodel.ID { + id := zkmodel.ID(n) + return &id +} + +func TestTagsTransformation(t *testing.T) { + keyValue := "value" + doubleValue := 123.456 + uintValue := int64(123) + statusMessage := "this is a problem" + instrLibName := "instrumentation-library" + instrLibVersion := "semver:1.0.0" + + tests := []struct { + name string + data tracetest.SpanStub + want map[string]string + }{ + { + name: "attributes", + data: tracetest.SpanStub{ + Attributes: []attribute.KeyValue{ + attribute.String("key", keyValue), + attribute.Float64("double", doubleValue), + attribute.Int64("uint", uintValue), + attribute.Bool("ok", true), + }, + }, + want: map[string]string{ + "double": fmt.Sprint(doubleValue), + "key": keyValue, + "ok": "true", + "uint": strconv.FormatInt(uintValue, 10), + }, + }, + { + name: "no attributes", + data: tracetest.SpanStub{}, + want: nil, + }, + { + name: "omit-noerror", + data: tracetest.SpanStub{ + Attributes: []attribute.KeyValue{ + attribute.Bool("error", false), + }, + }, + want: nil, + }, + { + name: "statusCode", + data: tracetest.SpanStub{ + Attributes: []attribute.KeyValue{ + attribute.String("key", keyValue), + attribute.Bool("error", true), + }, + Status: tracesdk.Status{ + Code: codes.Error, + Description: statusMessage, + }, + }, + want: map[string]string{ + "error": statusMessage, + "key": keyValue, + "otel.status_code": codes.Error.String(), + }, + }, + { + name: "instrLib-empty", + data: tracetest.SpanStub{ + InstrumentationLibrary: instrumentation.Library{}, + }, + want: nil, + }, + { + name: "instrLib-noversion", + data: tracetest.SpanStub{ + Attributes: []attribute.KeyValue{}, + InstrumentationLibrary: instrumentation.Library{ + Name: instrLibName, + }, + }, + want: map[string]string{ + "otel.library.name": instrLibName, + }, + }, + { + name: "instrLib-with-version", + data: tracetest.SpanStub{ + Attributes: []attribute.KeyValue{}, + InstrumentationLibrary: instrumentation.Library{ + Name: instrLibName, + Version: instrLibVersion, + }, + }, + want: map[string]string{ + "otel.library.name": instrLibName, + "otel.library.version": instrLibVersion, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := toZipkinTags(tt.data.Snapshot()) + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("Diff%v", diff) + } + }) + } +} + +func TestRemoteEndpointTransformation(t *testing.T) { + tests := []struct { + name string + data tracetest.SpanStub + want *zkmodel.Endpoint + }{ + { + name: "nil-not-applicable", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindClient, + Attributes: []attribute.KeyValue{}, + }, + want: nil, + }, + { + name: "nil-not-found", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindConsumer, + Attributes: []attribute.KeyValue{ + attribute.String("attr", "test"), + }, + }, + want: nil, + }, + { + name: "peer-service-rank", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindProducer, + Attributes: []attribute.KeyValue{ + semconv.PeerServiceKey.String("peer-service-test"), + semconv.NetPeerNameKey.String("peer-name-test"), + semconv.HTTPHostKey.String("http-host-test"), + }, + }, + want: &zkmodel.Endpoint{ + ServiceName: "peer-service-test", + }, + }, + { + name: "http-host-rank", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindProducer, + Attributes: []attribute.KeyValue{ + semconv.HTTPHostKey.String("http-host-test"), + semconv.DBNameKey.String("db-name-test"), + }, + }, + want: &zkmodel.Endpoint{ + ServiceName: "http-host-test", + }, + }, + { + name: "db-name-rank", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindProducer, + Attributes: []attribute.KeyValue{ + attribute.String("foo", "bar"), + semconv.DBNameKey.String("db-name-test"), + }, + }, + want: &zkmodel.Endpoint{ + ServiceName: "db-name-test", + }, + }, + { + name: "peer-hostname-rank", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindProducer, + Attributes: []attribute.KeyValue{ + keyPeerHostname.String("peer-hostname-test"), + keyPeerAddress.String("peer-address-test"), + semconv.HTTPHostKey.String("http-host-test"), + semconv.DBNameKey.String("http-host-test"), + }, + }, + want: &zkmodel.Endpoint{ + ServiceName: "peer-hostname-test", + }, + }, + { + name: "peer-address-rank", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindProducer, + Attributes: []attribute.KeyValue{ + keyPeerAddress.String("peer-address-test"), + semconv.HTTPHostKey.String("http-host-test"), + semconv.DBNameKey.String("http-host-test"), + }, + }, + want: &zkmodel.Endpoint{ + ServiceName: "peer-address-test", + }, + }, + { + name: "net-peer-invalid-ip", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindProducer, + Attributes: []attribute.KeyValue{ + semconv.NetPeerIPKey.String("INVALID"), + }, + }, + want: nil, + }, + { + name: "net-peer-ipv6-no-port", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindProducer, + Attributes: []attribute.KeyValue{ + semconv.NetPeerIPKey.String("0:0:1:5ee:bad:c0de:0:0"), + }, + }, + want: &zkmodel.Endpoint{ + IPv6: net.ParseIP("0:0:1:5ee:bad:c0de:0:0"), + }, + }, + { + name: "net-peer-ipv4-port", + data: tracetest.SpanStub{ + SpanKind: trace.SpanKindProducer, + Attributes: []attribute.KeyValue{ + semconv.NetPeerIPKey.String("1.2.3.4"), + semconv.NetPeerPortKey.Int(9876), + }, + }, + want: &zkmodel.Endpoint{ + IPv4: net.ParseIP("1.2.3.4"), + Port: 9876, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := toZipkinRemoteEndpoint(tt.data.Snapshot()) + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("Diff%v", diff) + } + }) + } +} + +func TestServiceName(t *testing.T) { + attrs := []attribute.KeyValue{} + assert.Equal(t, defaultServiceName, getServiceName(attrs)) + + attrs = append(attrs, attribute.String("test_key", "test_value")) + assert.Equal(t, defaultServiceName, getServiceName(attrs)) + + attrs = append(attrs, semconv.ServiceNameKey.String("my_service")) + assert.Equal(t, "my_service", getServiceName(attrs)) +} diff --git a/exporters/zipkin/zipkin.go b/exporters/zipkin/zipkin.go new file mode 100644 index 00000000000..5d09a47355a --- /dev/null +++ b/exporters/zipkin/zipkin.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin // import "go.opentelemetry.io/otel/exporters/zipkin" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "sync" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +// Exporter exports spans to the zipkin collector. +type Exporter struct { + url string + client *http.Client + logger *log.Logger + config config + + stoppedMu sync.RWMutex + stopped bool +} + +var ( + _ sdktrace.SpanExporter = &Exporter{} +) + +// Options contains configuration for the exporter. +type config struct { + client *http.Client + logger *log.Logger + tpOpts []sdktrace.TracerProviderOption +} + +// Option defines a function that configures the exporter. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (fn optionFunc) apply(cfg *config) { + fn(cfg) +} + +// WithLogger configures the exporter to use the passed logger. +func WithLogger(logger *log.Logger) Option { + return optionFunc(func(cfg *config) { + cfg.logger = logger + }) +} + +// WithClient configures the exporter to use the passed HTTP client. +func WithClient(client *http.Client) Option { + return optionFunc(func(cfg *config) { + cfg.client = client + }) +} + +// WithSDKOptions configures options passed to the created TracerProvider. +func WithSDKOptions(tpOpts ...sdktrace.TracerProviderOption) Option { + return optionFunc(func(cfg *config) { + cfg.tpOpts = tpOpts + }) +} + +// New creates a new Zipkin exporter. +func New(collectorURL string, opts ...Option) (*Exporter, error) { + if collectorURL == "" { + return nil, errors.New("collector URL cannot be empty") + } + u, err := url.Parse(collectorURL) + if err != nil { + return nil, fmt.Errorf("invalid collector URL: %v", err) + } + if u.Scheme == "" || u.Host == "" { + return nil, errors.New("invalid collector URL") + } + + cfg := config{} + for _, opt := range opts { + opt.apply(&cfg) + } + if cfg.client == nil { + cfg.client = http.DefaultClient + } + return &Exporter{ + url: collectorURL, + client: cfg.client, + logger: cfg.logger, + config: cfg, + }, nil +} + +// ExportSpans exports spans to a Zipkin receiver. +func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + e.stoppedMu.RLock() + stopped := e.stopped + e.stoppedMu.RUnlock() + if stopped { + e.logf("exporter stopped, not exporting span batch") + return nil + } + + if len(spans) == 0 { + e.logf("no spans to export") + return nil + } + models := toZipkinSpanModels(spans) + body, err := json.Marshal(models) + if err != nil { + return e.errf("failed to serialize zipkin models to JSON: %v", err) + } + e.logf("about to send a POST request to %s with body %s", e.url, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, e.url, bytes.NewBuffer(body)) + if err != nil { + return e.errf("failed to create request to %s: %v", e.url, err) + } + req.Header.Set("Content-Type", "application/json") + resp, err := e.client.Do(req) + if err != nil { + return e.errf("request to %s failed: %v", e.url, err) + } + defer resp.Body.Close() + + // Zipkin API returns a 202 on success and the content of the body isn't interesting + // but it is still being read because according to https://golang.org/pkg/net/http/#Response + // > The default HTTP client's Transport may not reuse HTTP/1.x "keep-alive" TCP connections + // > if the Body is not read to completion and closed. + _, err = io.Copy(ioutil.Discard, resp.Body) + if err != nil { + return e.errf("failed to read response body: %v", err) + } + + if resp.StatusCode != http.StatusAccepted { + return e.errf("failed to send spans to zipkin server with status %d", resp.StatusCode) + } + + return nil +} + +// Shutdown stops the exporter flushing any pending exports. +func (e *Exporter) Shutdown(ctx context.Context) error { + e.stoppedMu.Lock() + e.stopped = true + e.stoppedMu.Unlock() + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return nil +} + +func (e *Exporter) logf(format string, args ...interface{}) { + if e.logger != nil { + e.logger.Printf(format, args...) + } +} + +func (e *Exporter) errf(format string, args ...interface{}) error { + e.logf(format, args...) + return fmt.Errorf(format, args...) +} diff --git a/exporters/zipkin/zipkin_test.go b/exporters/zipkin/zipkin_test.go new file mode 100644 index 00000000000..bac8cdc2950 --- /dev/null +++ b/exporters/zipkin/zipkin_test.go @@ -0,0 +1,338 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zipkin + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "sync" + "testing" + "time" + + zkmodel "github.com/openzipkin/zipkin-go/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" +) + +const ( + collectorURL = "http://localhost:9411/api/v2/spans" +) + +func TestNewRawExporter(t *testing.T) { + _, err := New( + collectorURL, + ) + + assert.NoError(t, err) +} + +func TestNewRawExporterShouldFailInvalidCollectorURL(t *testing.T) { + var ( + exp *Exporter + err error + ) + + // cannot be empty + exp, err = New( + "", + ) + + assert.Error(t, err) + assert.EqualError(t, err, "collector URL cannot be empty") + assert.Nil(t, exp) + + // invalid URL + exp, err = New( + "localhost", + ) + + assert.Error(t, err) + assert.EqualError(t, err, "invalid collector URL") + assert.Nil(t, exp) +} + +type mockZipkinCollector struct { + t *testing.T + url string + closing bool + server *http.Server + wg *sync.WaitGroup + + lock sync.RWMutex + models []zkmodel.SpanModel +} + +func startMockZipkinCollector(t *testing.T) *mockZipkinCollector { + collector := &mockZipkinCollector{ + t: t, + closing: false, + } + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + collector.url = fmt.Sprintf("http://%s", listener.Addr().String()) + server := &http.Server{ + Handler: http.HandlerFunc(collector.handler), + } + collector.server = server + wg := &sync.WaitGroup{} + wg.Add(1) + collector.wg = wg + go func() { + err := server.Serve(listener) + require.True(t, collector.closing) + require.Equal(t, http.ErrServerClosed, err) + wg.Done() + }() + + return collector +} + +func (c *mockZipkinCollector) handler(w http.ResponseWriter, r *http.Request) { + jsonBytes, err := ioutil.ReadAll(r.Body) + require.NoError(c.t, err) + var models []zkmodel.SpanModel + err = json.Unmarshal(jsonBytes, &models) + require.NoError(c.t, err) + // for some reason we may get the nonUTC timestamps in models, + // fix that + for midx := range models { + models[midx].Timestamp = models[midx].Timestamp.UTC() + for aidx := range models[midx].Annotations { + models[midx].Annotations[aidx].Timestamp = models[midx].Annotations[aidx].Timestamp.UTC() + } + } + c.lock.Lock() + defer c.lock.Unlock() + c.models = append(c.models, models...) + w.WriteHeader(http.StatusAccepted) +} + +func (c *mockZipkinCollector) Close() { + if c.closing { + return + } + c.closing = true + server := c.server + c.server = nil + require.NoError(c.t, server.Shutdown(context.Background())) + c.wg.Wait() +} + +func (c *mockZipkinCollector) ModelsLen() int { + c.lock.RLock() + defer c.lock.RUnlock() + return len(c.models) +} + +func (c *mockZipkinCollector) StealModels() []zkmodel.SpanModel { + c.lock.Lock() + defer c.lock.Unlock() + models := c.models + c.models = nil + return models +} + +type logStore struct { + T *testing.T + Messages []string +} + +func (s *logStore) Write(p []byte) (n int, err error) { + msg := (string)(p) + if s.T != nil { + s.T.Logf("%s", msg) + } + s.Messages = append(s.Messages, msg) + return len(p), nil +} + +func logStoreLogger(s *logStore) *log.Logger { + return log.New(s, "", 0) +} + +func TestExportSpans(t *testing.T) { + resource := resource.NewSchemaless( + semconv.ServiceNameKey.String("exporter-test"), + ) + + spans := tracetest.SpanStubs{ + // parent + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + SpanKind: trace.SpanKindServer, + Name: "foo", + StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), + Attributes: nil, + Events: nil, + Status: sdktrace.Status{ + Code: codes.Error, + Description: "404, file not found", + }, + Resource: resource, + }, + // child + { + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xDF, 0xDE, 0xDD, 0xDC, 0xDB, 0xDA, 0xD9, 0xD8}, + }), + Parent: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + }), + SpanKind: trace.SpanKindServer, + Name: "bar", + StartTime: time.Date(2020, time.March, 11, 19, 24, 15, 0, time.UTC), + EndTime: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), + Attributes: nil, + Events: nil, + Status: sdktrace.Status{ + Code: codes.Error, + Description: "403, forbidden", + }, + Resource: resource, + }, + }.Snapshots() + models := []zkmodel.SpanModel{ + // model of parent + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xfffefdfcfbfaf9f8), + ParentID: nil, + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "foo", + Kind: "SERVER", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), + Duration: time.Minute, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "exporter-test", + }, + RemoteEndpoint: nil, + Annotations: nil, + Tags: map[string]string{ + "otel.status_code": "Error", + "error": "404, file not found", + }, + }, + // model of child + { + SpanContext: zkmodel.SpanContext{ + TraceID: zkmodel.TraceID{ + High: 0x001020304050607, + Low: 0x8090a0b0c0d0e0f, + }, + ID: zkmodel.ID(0xdfdedddcdbdad9d8), + ParentID: zkmodelIDPtr(0xfffefdfcfbfaf9f8), + Debug: false, + Sampled: nil, + Err: nil, + }, + Name: "bar", + Kind: "SERVER", + Timestamp: time.Date(2020, time.March, 11, 19, 24, 15, 0, time.UTC), + Duration: 30 * time.Second, + Shared: false, + LocalEndpoint: &zkmodel.Endpoint{ + ServiceName: "exporter-test", + }, + RemoteEndpoint: nil, + Annotations: nil, + Tags: map[string]string{ + "otel.status_code": "Error", + "error": "403, forbidden", + }, + }, + } + require.Len(t, models, len(spans)) + collector := startMockZipkinCollector(t) + defer collector.Close() + ls := &logStore{T: t} + logger := logStoreLogger(ls) + exporter, err := New(collector.url, WithLogger(logger)) + require.NoError(t, err) + ctx := context.Background() + require.Len(t, ls.Messages, 0) + require.NoError(t, exporter.ExportSpans(ctx, spans[0:1])) + require.Len(t, ls.Messages, 1) + require.Contains(t, ls.Messages[0], "send a POST request") + ls.Messages = nil + require.NoError(t, exporter.ExportSpans(ctx, nil)) + require.Len(t, ls.Messages, 1) + require.Contains(t, ls.Messages[0], "no spans to export") + ls.Messages = nil + require.NoError(t, exporter.ExportSpans(ctx, spans[1:2])) + require.Contains(t, ls.Messages[0], "send a POST request") + checkFunc := func() bool { + return collector.ModelsLen() == len(models) + } + require.Eventually(t, checkFunc, time.Second, 10*time.Millisecond) + require.Equal(t, models, collector.StealModels()) +} + +func TestExporterShutdownHonorsTimeout(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + exp, err := New(collectorURL) + require.NoError(t, err) + + innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond) + defer innerCancel() + <-innerCtx.Done() + assert.Errorf(t, exp.Shutdown(innerCtx), context.DeadlineExceeded.Error()) +} + +func TestExporterShutdownHonorsCancel(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + exp, err := New(collectorURL) + require.NoError(t, err) + + innerCtx, innerCancel := context.WithCancel(ctx) + innerCancel() + assert.Errorf(t, exp.Shutdown(innerCtx), context.Canceled.Error()) +} + +func TestErrorOnExportShutdownExporter(t *testing.T) { + exp, err := New(collectorURL) + require.NoError(t, err) + assert.NoError(t, exp.Shutdown(context.Background())) + assert.NoError(t, exp.ExportSpans(context.Background(), nil)) +} diff --git a/go.mod b/go.mod index 07a32c37302..6b16d557852 100644 --- a/go.mod +++ b/go.mod @@ -29,13 +29,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ./example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ./example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ./exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ./exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ./exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ./exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ./exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ./exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ./exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ./internal/tools @@ -61,6 +61,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ./exp replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ./exporters/otlp/otlptrace/otlptracehttp +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ./exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ./exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ./exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ./exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ./exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/internal/metric/go.mod b/internal/metric/go.mod index f7011d84894..aafcfac89eb 100644 --- a/internal/metric/go.mod +++ b/internal/metric/go.mod @@ -34,7 +34,7 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../../exporters/otlp/otlptrace @@ -44,9 +44,9 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/exporters/stdout => ../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../tools @@ -60,6 +60,12 @@ replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric replace go.opentelemetry.io/otel/trace => ../../trace +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 4c9403b937f..b70f47eb12e 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -32,13 +32,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ./ @@ -64,6 +64,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/metric/go.mod b/metric/go.mod index c2604669e1a..deb5119d23e 100644 --- a/metric/go.mod +++ b/metric/go.mod @@ -22,13 +22,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../internal/tools @@ -61,6 +61,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../ex replace go.opentelemetry.io/otel/internal/metric => ../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/oteltest/go.mod b/oteltest/go.mod index 3c105360b26..ca82fb70774 100644 --- a/oteltest/go.mod +++ b/oteltest/go.mod @@ -22,13 +22,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../internal/tools @@ -60,6 +60,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../ex replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../exporters/otlp/otlptrace/otlptracehttp +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/sdk/export/metric/go.mod b/sdk/export/metric/go.mod index 159146789e6..24075f6514b 100644 --- a/sdk/export/metric/go.mod +++ b/sdk/export/metric/go.mod @@ -22,13 +22,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../../example/promethe replace go.opentelemetry.io/otel/example/zipkin => ../../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../../internal/tools @@ -61,6 +61,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/sdk/go.mod b/sdk/go.mod index 67e7329518f..f051a45f905 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -30,13 +30,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../internal/tools @@ -62,6 +62,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../ex replace go.opentelemetry.io/otel/internal/metric => ../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/sdk/metric/go.mod b/sdk/metric/go.mod index 740532b7c4c..8bf4ad7b121 100644 --- a/sdk/metric/go.mod +++ b/sdk/metric/go.mod @@ -22,13 +22,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../../internal/tools @@ -64,6 +64,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../.. replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/trace/go.mod b/trace/go.mod index bfbce93a8c2..deb9c249144 100644 --- a/trace/go.mod +++ b/trace/go.mod @@ -22,13 +22,13 @@ replace go.opentelemetry.io/otel/example/prometheus => ../example/prometheus replace go.opentelemetry.io/otel/example/zipkin => ../example/zipkin -replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus +replace go.opentelemetry.io/otel/exporters/prometheus => ../exporters/prometheus replace go.opentelemetry.io/otel/exporters/stdout => ../exporters/stdout -replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger +replace go.opentelemetry.io/otel/exporters/jaeger => ../exporters/jaeger -replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin +replace go.opentelemetry.io/otel/exporters/zipkin => ../exporters/zipkin replace go.opentelemetry.io/otel/internal/tools => ../internal/tools @@ -60,6 +60,12 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../ex replace go.opentelemetry.io/otel/internal/metric => ../internal/metric +replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../exporters/metric/prometheus + +replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../exporters/trace/jaeger + +replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../exporters/trace/zipkin + replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc diff --git a/website_docs/exporting_data.md b/website_docs/exporting_data.md index 0d4cab5b18f..c01dc0c460e 100644 --- a/website_docs/exporting_data.md +++ b/website_docs/exporting_data.md @@ -53,12 +53,12 @@ Please find more documentation on [GitHub](https://github.com/open-telemetry/ope # Jaeger Exporter -Jaeger export is available in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. +Jaeger export is available in the `go.opentelemetry.io/otel/exporters/jaeger` package. -Please find more documentation on [GitHub](https://github.com/open-telemetry/opentelemetry-go/tree/main/exporters/trace/jaeger) +Please find more documentation on [GitHub](https://github.com/open-telemetry/opentelemetry-go/tree/main/exporters/jaeger) # Prometheus Exporter -Prometheus export is available in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. +Prometheus export is available in the `go.opentelemetry.io/otel/exporters/prometheus` package. -Please find more documentation on [GitHub](https://github.com/open-telemetry/opentelemetry-go/tree/main/exporters/metric/prometheus) +Please find more documentation on [GitHub](https://github.com/open-telemetry/opentelemetry-go/tree/main/exporters/prometheus)